file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
pipeline.py
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from skimage.feature import hog from helper_func import * from sklearn.model_selection import GridSearchCV import pickle import random # NOTE: the next import is only valid for scikit-learn version <= 0.17 # for scikit-learn >= 0.18 use: from sklearn.model_selection import train_test_split from scipy.ndimage.measurements import label from collections import deque from moviepy.editor import VideoFileClip from IPython.display import HTML #from sklearn.cross_validation import train_test_split # Define a function to extract features from a single image window # This function is very similar to extract_features() # just for a single image rather than list of images def
(img): new_img = cv2.GaussianBlur(img, (3,3), 0) #new_img = cv2.cvtColor(new_img, cv2.COLOR_YUV2RGB) new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2HSV) new_img = np.array(new_img, dtype = np.float64) #Generate new random brightness random_bright = .5+random.uniform(0.3,1.0) new_img[:,:,2] = random_bright*new_img[:,:,2] new_img[:,:,2][new_img[:,:,2]>255] = 255 new_img = np.array(new_img, dtype = np.uint8) #Convert back to RGB colorspace new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB) #new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV) return new_img # Read in cars and notcars images = glob.glob('./dataset/*.png') cars = [] notcars = [] for image in images: cars.append(image) images = glob.glob('./dataset_nonv/*.png') for image in images: notcars.append(image) # Reduce the sample size because # The quiz evaluator times out after 13s of CPU time #sample_size = 500 #cars = cars[0:sample_size] #notcars = notcars[0:sample_size] color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 32 # HOG orientations pix_per_cell = 8 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) # Spatial binning dimensions hist_bins = 32 # Number of histogram bins spatial_feat = True # Spatial features on or off hist_feat = True # Histogram features on or off hog_feat = True # HOG features on or off y_start_stop = [400, 656] # Min and max in y to search in slide_window() def train_model(cars, notcars): car_features = extract_features(cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) notcar_features = extract_features(notcars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block') print('Feature vector length:', len(X_train[0])) # Use a linear SVC parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = GridSearchCV(svr, parameters) #svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() #model = pickle.dump(svc, 'model.pkl') with open('model.p', 'wb') as f: pickle.dump((svc, X_scaler), f) #return svc, X_scaler #image = mpimg.imread('test1.jpg') #draw_image = np.copy(image) # Uncomment the following line if you extracted training # data from .png images (scaled 0 to 1 by mpimg) and the # image you are searching is a .jpg (scaled 0 to 255) #image = image.astype(np.float32)/255 #windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop, # xy_window=(96, 96), xy_overlap=(0.5, 0.5)) #hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, # spatial_size=spatial_size, hist_bins=hist_bins, # orient=orient, pix_per_cell=pix_per_cell, # cell_per_block=cell_per_block, # hog_channel=hog_channel, spatial_feat=spatial_feat, # hist_feat=hist_feat, hog_feat=hog_feat) #window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6) #plt.imshow(window_img) count =6 last_labels = [] history = deque(maxlen=5) def find_vehicles_in_frame(image): global count global last_labels #if count < 2: # count = count + 1 # draw_img = draw_labeled_bboxes(np.copy(image), last_labels) # return draw_img #draw_img = draw_labeled_bboxes(np.copy(image), labels) #return draw_img #else: #print(count) ystart = 400 ystop = 656 scale = 1.5 box_list = [] #image = mpimg.imread('test1.jpg') svc, X_scaler = pickle.load( open("model.p", "rb" ) ) box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) last_box_list = box_list #ystart = 355 #ystop = 550 #scale = 1.5 #box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #box_list = box_list1 + box_list2 heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list heat = add_heat(heat,box_list) history.append(heat) if count >=6: #print('draw') #print(history) hist1 = 0 hist2 = 0 #NULL hist3 = 0 #NULL hist4 = 0 #NULL hist5 = 0 #NULL hist6 = 0 #NULL hist7 = 0 #NULL hist1 = history.popleft() if history: hist2 = history.popleft() if history: hist3 = history.popleft() if history: hist4 = history.popleft() if history: hist5 = history.popleft() if history: hist6 = history.popleft() if history: hist7 = history.popleft() heat = hist1 + hist2 + hist3 + hist4 + hist5 + hist6 + hist7 # Apply threshold to help remove false positives heat = apply_threshold(heat,7) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) last_labels = labels #return labels, heatmap count = 0 draw_img = draw_labeled_bboxes(np.copy(image), labels) return draw_img else: #print('skip') count = count + 1 #heatmap = np.clip(heat, 0, 255) #labels = label(heatmap) #last_labels = labels draw_img = draw_labeled_bboxes(np.copy(image), last_labels) return draw_img def find_vehicles_in_video(video): output = "tracked2_" + video input_clip = VideoFileClip(video) clip = input_clip.fl_image(find_vehicles_in_frame) #clip = input_clip.fl_image(save_image) clip.write_videofile(output, audio=False) def main(): ystart = 400 ystop = 656 scale = 1.5 ### TRAINING ##### print(len(cars)) #train_model(cars, notcars) ### INFERENCE ##### #myimage = mpimg.imread('./test1.jpg') myvid = 'project_video.mp4' find_vehicles_in_video(myvid) #new_img =find_vehicles_in_frame(myimage) #plt.imshow(new_img) #out_img, box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list #heat = add_heat(heat,box_list) # Apply threshold to help remove false positives #heat = apply_threshold(heat,1) # Visualize the heatmap when displaying #heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function #labels = label(heatmap) #draw_img = draw_labeled_bboxes(np.copy(image), labels) #fig = plt.figure() #plt.subplot(121) #plt.imshow(image) #plt.title('Original') #plt.subplot(121) #plt.imshow(draw_img) #plt.title('Car Positions') #plt.subplot(122) #plt.imshow(heatmap, cmap='hot') #plt.title('Heat Map') #fig.tight_layout() if __name__ == '__main__': main()
augment_image
identifier_name
pipeline.py
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from skimage.feature import hog from helper_func import * from sklearn.model_selection import GridSearchCV import pickle import random # NOTE: the next import is only valid for scikit-learn version <= 0.17 # for scikit-learn >= 0.18 use: from sklearn.model_selection import train_test_split from scipy.ndimage.measurements import label from collections import deque from moviepy.editor import VideoFileClip from IPython.display import HTML #from sklearn.cross_validation import train_test_split # Define a function to extract features from a single image window # This function is very similar to extract_features() # just for a single image rather than list of images def augment_image(img): new_img = cv2.GaussianBlur(img, (3,3), 0) #new_img = cv2.cvtColor(new_img, cv2.COLOR_YUV2RGB) new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2HSV) new_img = np.array(new_img, dtype = np.float64) #Generate new random brightness random_bright = .5+random.uniform(0.3,1.0) new_img[:,:,2] = random_bright*new_img[:,:,2] new_img[:,:,2][new_img[:,:,2]>255] = 255 new_img = np.array(new_img, dtype = np.uint8) #Convert back to RGB colorspace new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB) #new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV) return new_img # Read in cars and notcars images = glob.glob('./dataset/*.png') cars = [] notcars = [] for image in images: cars.append(image) images = glob.glob('./dataset_nonv/*.png') for image in images: notcars.append(image) # Reduce the sample size because # The quiz evaluator times out after 13s of CPU time #sample_size = 500 #cars = cars[0:sample_size] #notcars = notcars[0:sample_size] color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 32 # HOG orientations pix_per_cell = 8 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) # Spatial binning dimensions hist_bins = 32 # Number of histogram bins spatial_feat = True # Spatial features on or off hist_feat = True # Histogram features on or off hog_feat = True # HOG features on or off y_start_stop = [400, 656] # Min and max in y to search in slide_window() def train_model(cars, notcars): car_features = extract_features(cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) notcar_features = extract_features(notcars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block') print('Feature vector length:', len(X_train[0])) # Use a linear SVC parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = GridSearchCV(svr, parameters) #svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() #model = pickle.dump(svc, 'model.pkl') with open('model.p', 'wb') as f: pickle.dump((svc, X_scaler), f) #return svc, X_scaler #image = mpimg.imread('test1.jpg') #draw_image = np.copy(image) # Uncomment the following line if you extracted training # data from .png images (scaled 0 to 1 by mpimg) and the # image you are searching is a .jpg (scaled 0 to 255) #image = image.astype(np.float32)/255 #windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop, # xy_window=(96, 96), xy_overlap=(0.5, 0.5)) #hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, # spatial_size=spatial_size, hist_bins=hist_bins, # orient=orient, pix_per_cell=pix_per_cell, # cell_per_block=cell_per_block, # hog_channel=hog_channel, spatial_feat=spatial_feat, # hist_feat=hist_feat, hog_feat=hog_feat) #window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6) #plt.imshow(window_img) count =6 last_labels = [] history = deque(maxlen=5) def find_vehicles_in_frame(image): global count global last_labels #if count < 2: # count = count + 1 # draw_img = draw_labeled_bboxes(np.copy(image), last_labels) # return draw_img #draw_img = draw_labeled_bboxes(np.copy(image), labels) #return draw_img #else: #print(count) ystart = 400 ystop = 656 scale = 1.5 box_list = [] #image = mpimg.imread('test1.jpg') svc, X_scaler = pickle.load( open("model.p", "rb" ) ) box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) last_box_list = box_list #ystart = 355 #ystop = 550 #scale = 1.5 #box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #box_list = box_list1 + box_list2 heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list heat = add_heat(heat,box_list) history.append(heat) if count >=6: #print('draw') #print(history) hist1 = 0 hist2 = 0 #NULL hist3 = 0 #NULL hist4 = 0 #NULL hist5 = 0 #NULL hist6 = 0 #NULL hist7 = 0 #NULL hist1 = history.popleft() if history: hist2 = history.popleft() if history: hist3 = history.popleft() if history: hist4 = history.popleft() if history: hist5 = history.popleft() if history: hist6 = history.popleft() if history: hist7 = history.popleft() heat = hist1 + hist2 + hist3 + hist4 + hist5 + hist6 + hist7 # Apply threshold to help remove false positives heat = apply_threshold(heat,7) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) last_labels = labels #return labels, heatmap count = 0 draw_img = draw_labeled_bboxes(np.copy(image), labels) return draw_img else: #print('skip') count = count + 1 #heatmap = np.clip(heat, 0, 255) #labels = label(heatmap) #last_labels = labels draw_img = draw_labeled_bboxes(np.copy(image), last_labels) return draw_img def find_vehicles_in_video(video):
def main(): ystart = 400 ystop = 656 scale = 1.5 ### TRAINING ##### print(len(cars)) #train_model(cars, notcars) ### INFERENCE ##### #myimage = mpimg.imread('./test1.jpg') myvid = 'project_video.mp4' find_vehicles_in_video(myvid) #new_img =find_vehicles_in_frame(myimage) #plt.imshow(new_img) #out_img, box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list #heat = add_heat(heat,box_list) # Apply threshold to help remove false positives #heat = apply_threshold(heat,1) # Visualize the heatmap when displaying #heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function #labels = label(heatmap) #draw_img = draw_labeled_bboxes(np.copy(image), labels) #fig = plt.figure() #plt.subplot(121) #plt.imshow(image) #plt.title('Original') #plt.subplot(121) #plt.imshow(draw_img) #plt.title('Car Positions') #plt.subplot(122) #plt.imshow(heatmap, cmap='hot') #plt.title('Heat Map') #fig.tight_layout() if __name__ == '__main__': main()
output = "tracked2_" + video input_clip = VideoFileClip(video) clip = input_clip.fl_image(find_vehicles_in_frame) #clip = input_clip.fl_image(save_image) clip.write_videofile(output, audio=False)
identifier_body
pipeline.py
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from skimage.feature import hog from helper_func import * from sklearn.model_selection import GridSearchCV import pickle import random # NOTE: the next import is only valid for scikit-learn version <= 0.17 # for scikit-learn >= 0.18 use: from sklearn.model_selection import train_test_split from scipy.ndimage.measurements import label from collections import deque from moviepy.editor import VideoFileClip from IPython.display import HTML #from sklearn.cross_validation import train_test_split # Define a function to extract features from a single image window # This function is very similar to extract_features() # just for a single image rather than list of images def augment_image(img): new_img = cv2.GaussianBlur(img, (3,3), 0) #new_img = cv2.cvtColor(new_img, cv2.COLOR_YUV2RGB) new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2HSV) new_img = np.array(new_img, dtype = np.float64) #Generate new random brightness random_bright = .5+random.uniform(0.3,1.0) new_img[:,:,2] = random_bright*new_img[:,:,2] new_img[:,:,2][new_img[:,:,2]>255] = 255 new_img = np.array(new_img, dtype = np.uint8) #Convert back to RGB colorspace new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB) #new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV) return new_img # Read in cars and notcars images = glob.glob('./dataset/*.png') cars = [] notcars = [] for image in images: cars.append(image) images = glob.glob('./dataset_nonv/*.png') for image in images: notcars.append(image) # Reduce the sample size because # The quiz evaluator times out after 13s of CPU time #sample_size = 500 #cars = cars[0:sample_size] #notcars = notcars[0:sample_size] color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 32 # HOG orientations pix_per_cell = 8 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) # Spatial binning dimensions hist_bins = 32 # Number of histogram bins spatial_feat = True # Spatial features on or off hist_feat = True # Histogram features on or off hog_feat = True # HOG features on or off y_start_stop = [400, 656] # Min and max in y to search in slide_window() def train_model(cars, notcars): car_features = extract_features(cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) notcar_features = extract_features(notcars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block') print('Feature vector length:', len(X_train[0])) # Use a linear SVC parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = GridSearchCV(svr, parameters) #svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() #model = pickle.dump(svc, 'model.pkl') with open('model.p', 'wb') as f: pickle.dump((svc, X_scaler), f) #return svc, X_scaler #image = mpimg.imread('test1.jpg') #draw_image = np.copy(image) # Uncomment the following line if you extracted training # data from .png images (scaled 0 to 1 by mpimg) and the # image you are searching is a .jpg (scaled 0 to 255) #image = image.astype(np.float32)/255 #windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop, # xy_window=(96, 96), xy_overlap=(0.5, 0.5)) #hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, # spatial_size=spatial_size, hist_bins=hist_bins, # orient=orient, pix_per_cell=pix_per_cell, # cell_per_block=cell_per_block, # hog_channel=hog_channel, spatial_feat=spatial_feat, # hist_feat=hist_feat, hog_feat=hog_feat) #window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6) #plt.imshow(window_img) count =6 last_labels = [] history = deque(maxlen=5) def find_vehicles_in_frame(image): global count global last_labels #if count < 2: # count = count + 1 # draw_img = draw_labeled_bboxes(np.copy(image), last_labels) # return draw_img #draw_img = draw_labeled_bboxes(np.copy(image), labels) #return draw_img #else: #print(count) ystart = 400 ystop = 656 scale = 1.5 box_list = [] #image = mpimg.imread('test1.jpg') svc, X_scaler = pickle.load( open("model.p", "rb" ) ) box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) last_box_list = box_list #ystart = 355 #ystop = 550 #scale = 1.5 #box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #box_list = box_list1 + box_list2 heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list heat = add_heat(heat,box_list) history.append(heat) if count >=6: #print('draw') #print(history) hist1 = 0 hist2 = 0 #NULL hist3 = 0 #NULL hist4 = 0 #NULL hist5 = 0 #NULL hist6 = 0 #NULL hist7 = 0 #NULL hist1 = history.popleft() if history:
if history: hist3 = history.popleft() if history: hist4 = history.popleft() if history: hist5 = history.popleft() if history: hist6 = history.popleft() if history: hist7 = history.popleft() heat = hist1 + hist2 + hist3 + hist4 + hist5 + hist6 + hist7 # Apply threshold to help remove false positives heat = apply_threshold(heat,7) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) last_labels = labels #return labels, heatmap count = 0 draw_img = draw_labeled_bboxes(np.copy(image), labels) return draw_img else: #print('skip') count = count + 1 #heatmap = np.clip(heat, 0, 255) #labels = label(heatmap) #last_labels = labels draw_img = draw_labeled_bboxes(np.copy(image), last_labels) return draw_img def find_vehicles_in_video(video): output = "tracked2_" + video input_clip = VideoFileClip(video) clip = input_clip.fl_image(find_vehicles_in_frame) #clip = input_clip.fl_image(save_image) clip.write_videofile(output, audio=False) def main(): ystart = 400 ystop = 656 scale = 1.5 ### TRAINING ##### print(len(cars)) #train_model(cars, notcars) ### INFERENCE ##### #myimage = mpimg.imread('./test1.jpg') myvid = 'project_video.mp4' find_vehicles_in_video(myvid) #new_img =find_vehicles_in_frame(myimage) #plt.imshow(new_img) #out_img, box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list #heat = add_heat(heat,box_list) # Apply threshold to help remove false positives #heat = apply_threshold(heat,1) # Visualize the heatmap when displaying #heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function #labels = label(heatmap) #draw_img = draw_labeled_bboxes(np.copy(image), labels) #fig = plt.figure() #plt.subplot(121) #plt.imshow(image) #plt.title('Original') #plt.subplot(121) #plt.imshow(draw_img) #plt.title('Car Positions') #plt.subplot(122) #plt.imshow(heatmap, cmap='hot') #plt.title('Heat Map') #fig.tight_layout() if __name__ == '__main__': main()
hist2 = history.popleft()
conditional_block
pipeline.py
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from skimage.feature import hog from helper_func import * from sklearn.model_selection import GridSearchCV import pickle import random # NOTE: the next import is only valid for scikit-learn version <= 0.17 # for scikit-learn >= 0.18 use: from sklearn.model_selection import train_test_split from scipy.ndimage.measurements import label from collections import deque from moviepy.editor import VideoFileClip from IPython.display import HTML #from sklearn.cross_validation import train_test_split # Define a function to extract features from a single image window # This function is very similar to extract_features() # just for a single image rather than list of images def augment_image(img): new_img = cv2.GaussianBlur(img, (3,3), 0) #new_img = cv2.cvtColor(new_img, cv2.COLOR_YUV2RGB) new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2HSV) new_img = np.array(new_img, dtype = np.float64) #Generate new random brightness random_bright = .5+random.uniform(0.3,1.0) new_img[:,:,2] = random_bright*new_img[:,:,2] new_img[:,:,2][new_img[:,:,2]>255] = 255 new_img = np.array(new_img, dtype = np.uint8) #Convert back to RGB colorspace new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB) #new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV) return new_img # Read in cars and notcars images = glob.glob('./dataset/*.png') cars = [] notcars = [] for image in images: cars.append(image) images = glob.glob('./dataset_nonv/*.png') for image in images: notcars.append(image) # Reduce the sample size because # The quiz evaluator times out after 13s of CPU time #sample_size = 500 #cars = cars[0:sample_size] #notcars = notcars[0:sample_size] color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 32 # HOG orientations pix_per_cell = 8 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) # Spatial binning dimensions hist_bins = 32 # Number of histogram bins spatial_feat = True # Spatial features on or off hist_feat = True # Histogram features on or off hog_feat = True # HOG features on or off y_start_stop = [400, 656] # Min and max in y to search in slide_window() def train_model(cars, notcars): car_features = extract_features(cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) notcar_features = extract_features(notcars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block') print('Feature vector length:', len(X_train[0])) # Use a linear SVC parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = GridSearchCV(svr, parameters) #svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() #model = pickle.dump(svc, 'model.pkl') with open('model.p', 'wb') as f: pickle.dump((svc, X_scaler), f) #return svc, X_scaler #image = mpimg.imread('test1.jpg') #draw_image = np.copy(image) # Uncomment the following line if you extracted training # data from .png images (scaled 0 to 1 by mpimg) and the
#hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, # spatial_size=spatial_size, hist_bins=hist_bins, # orient=orient, pix_per_cell=pix_per_cell, # cell_per_block=cell_per_block, # hog_channel=hog_channel, spatial_feat=spatial_feat, # hist_feat=hist_feat, hog_feat=hog_feat) #window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6) #plt.imshow(window_img) count =6 last_labels = [] history = deque(maxlen=5) def find_vehicles_in_frame(image): global count global last_labels #if count < 2: # count = count + 1 # draw_img = draw_labeled_bboxes(np.copy(image), last_labels) # return draw_img #draw_img = draw_labeled_bboxes(np.copy(image), labels) #return draw_img #else: #print(count) ystart = 400 ystop = 656 scale = 1.5 box_list = [] #image = mpimg.imread('test1.jpg') svc, X_scaler = pickle.load( open("model.p", "rb" ) ) box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) last_box_list = box_list #ystart = 355 #ystop = 550 #scale = 1.5 #box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #box_list = box_list1 + box_list2 heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list heat = add_heat(heat,box_list) history.append(heat) if count >=6: #print('draw') #print(history) hist1 = 0 hist2 = 0 #NULL hist3 = 0 #NULL hist4 = 0 #NULL hist5 = 0 #NULL hist6 = 0 #NULL hist7 = 0 #NULL hist1 = history.popleft() if history: hist2 = history.popleft() if history: hist3 = history.popleft() if history: hist4 = history.popleft() if history: hist5 = history.popleft() if history: hist6 = history.popleft() if history: hist7 = history.popleft() heat = hist1 + hist2 + hist3 + hist4 + hist5 + hist6 + hist7 # Apply threshold to help remove false positives heat = apply_threshold(heat,7) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) last_labels = labels #return labels, heatmap count = 0 draw_img = draw_labeled_bboxes(np.copy(image), labels) return draw_img else: #print('skip') count = count + 1 #heatmap = np.clip(heat, 0, 255) #labels = label(heatmap) #last_labels = labels draw_img = draw_labeled_bboxes(np.copy(image), last_labels) return draw_img def find_vehicles_in_video(video): output = "tracked2_" + video input_clip = VideoFileClip(video) clip = input_clip.fl_image(find_vehicles_in_frame) #clip = input_clip.fl_image(save_image) clip.write_videofile(output, audio=False) def main(): ystart = 400 ystop = 656 scale = 1.5 ### TRAINING ##### print(len(cars)) #train_model(cars, notcars) ### INFERENCE ##### #myimage = mpimg.imread('./test1.jpg') myvid = 'project_video.mp4' find_vehicles_in_video(myvid) #new_img =find_vehicles_in_frame(myimage) #plt.imshow(new_img) #out_img, box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) #heat = np.zeros_like(image[:,:,0]).astype(np.float) # Add heat to each box in box list #heat = add_heat(heat,box_list) # Apply threshold to help remove false positives #heat = apply_threshold(heat,1) # Visualize the heatmap when displaying #heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function #labels = label(heatmap) #draw_img = draw_labeled_bboxes(np.copy(image), labels) #fig = plt.figure() #plt.subplot(121) #plt.imshow(image) #plt.title('Original') #plt.subplot(121) #plt.imshow(draw_img) #plt.title('Car Positions') #plt.subplot(122) #plt.imshow(heatmap, cmap='hot') #plt.title('Heat Map') #fig.tight_layout() if __name__ == '__main__': main()
# image you are searching is a .jpg (scaled 0 to 255) #image = image.astype(np.float32)/255 #windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop, # xy_window=(96, 96), xy_overlap=(0.5, 0.5))
random_line_split
app.rs
use std::cell::RefCell; use std::error; use gio::{self, prelude::*}; use gtk::{self, prelude::*}; use crate::utils::*; use crate::header_bar::*; use crate::about_dialog::*; #[derive(Clone)] pub struct App { main_window: gtk::ApplicationWindow, pub header_bar: HeaderBar, url_input: gtk::Entry } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Action { About, Quit, ClickToggle(ToggleButtonState) } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ToggleButtonState { State1, State2, } impl<'a> From<&'a glib::Variant> for ToggleButtonState { fn from(v: &glib::Variant) -> ToggleButtonState { v.get::<bool>().expect("Invalid record state type").into() } } impl From<bool> for ToggleButtonState { fn from(v: bool) -> ToggleButtonState { match v { false => ToggleButtonState::State1, true => ToggleButtonState::State2, } } } impl From<ToggleButtonState> for glib::Variant { fn from(v: ToggleButtonState) -> glib::Variant { match v { ToggleButtonState::State1 => false.to_variant(), ToggleButtonState::State2 => true.to_variant(), } } } trait GtkComboBoxTrait { fn get_text(self: &Self) -> String; } impl GtkComboBoxTrait for gtk::ComboBoxText { fn get_text(&self) -> String { self.get_active_text() .expect("Failed to get widget text") .to_string() } } impl App { fn new(application: &gtk::Application) -> Result<App, Box<dyn error::Error>> { let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT); // Here build the UI but don't show it yet let main_window = gtk::ApplicationWindow::new(application); main_window.set_title("(poor) Postman"); main_window.set_border_width(5); main_window.set_position(gtk::WindowPosition::Center); main_window.set_default_size(840, 480); // Create headerbar for the application window let header_bar = HeaderBar::new(&main_window); // create a widget container, let layout = gtk::Box::new(gtk::Orientation::Vertical, 5); // Create a title label let url_title = gtk::Label::new(None); url_title.set_markup("<big>Type in your URL</big>"); // Pressing Alt+T will activate this button let button = gtk::Button::new(); let btn_label = gtk::Label::new_with_mnemonic( Some("_Click to trigger request") ); button.add(&btn_label); // Trigger request button let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); trigger_btn_row.pack_start(&button, false, true, 10); let url_input = gtk::Entry::new(); url_input.set_placeholder_text("(poor) Postman"); url_input.insert_text("http://httpbin.org/get", &mut 0); let verb_selector = gtk::ComboBoxText::new(); verb_selector.insert(0, "ID0", "GET"); verb_selector.insert(1, "ID1", "POST"); verb_selector.set_active(Some(0)); let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); verb_url_row.add(&verb_selector); // http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start // params: child, expand, fill, padding (px) verb_url_row.pack_start(&url_input, true, true, 0); // Payload horizontal block let payload_title = gtk::Label::new(None); payload_title.set_markup("<big>Payload</big>"); let payload_input = gtk::Entry::new(); payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0); payload_input.set_sensitive(false); let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); payload_row.set_sensitive(false); payload_row.add(&payload_title); payload_row.pack_start(&payload_input, true, true, 0); // when POST is selected, activate the payload input box // TODO: why don't I need to also clone "payload_input"? verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| { let txt = gtk::ComboBoxText::get_text(&verb_selector); match txt.as_ref() { "POST" => { payload_row.set_sensitive(true); payload_input.set_sensitive(true); } _ => { payload_row.set_sensitive(false); payload_input.set_sensitive(false); } } })); // connect the Button click to the callback button.connect_clicked(clone!(button, verb_selector, url_input, payload_input, tx => move |_| { button.set_sensitive(false); // and trigger HTTP thread spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), url_input.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // connect the <Return> keypress to the callback url_input.connect_activate(clone!(button, verb_selector, payload_input, tx => move |_entry| { button.set_sensitive(false); spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), _entry.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // container for the response let response_container = gtk::TextView::new(); response_container.set_editable(false); response_container.set_wrap_mode(gtk::WrapMode::Word); let buf = response_container.get_buffer().expect("I thought it could work..."); buf.set_text("The response will appear here..."); // add all widgets layout.add(&url_title); layout.add(&verb_url_row); layout.pack_start(&payload_row, false, true, 10); layout.add(&trigger_btn_row); layout.pack_start(&response_container, true, true, 10); // add the widget container to the window main_window.add(&layout); let app = App { main_window, url_input, header_bar, }; // Create the application actions Action::create(&app, &application); // attach thread receiver rx.attach(None, move |text| { // let text = format_response(text); buf.set_text(&text); // enable the button again button.set_sensitive(true); // keeps the channel open glib::Continue(true) }); Ok(app) } pub fn on_startup(application: &gtk::Application) { let app = match App::new(application) { Ok(app) => app, Err(err) => { eprintln!("Error creating app: {}",err); return; } }; application.connect_activate(clone!(app => move |_| { app.on_activate(); })); // cant get rid of this RefCell wrapping ... let app_container = RefCell::new(Some(app)); application.connect_shutdown(move |_| { let app = app_container .borrow_mut() .take() .expect("Shutdown called multiple times"); app.on_shutdown(); }); } fn on_activate(&self) { // Show our window and bring it to the foreground self.main_window.show_all(); self.main_window .present_with_time((glib::get_monotonic_time() / 1000) as u32); } // Called when the application shuts down. We drop our app struct here fn on_shutdown(self)
} impl Action { // The full action name as is used in e.g. menu models pub fn full_name(self) -> &'static str { match self { Action::About => "app.about", Action::Quit => "app.quit", Action::ClickToggle(_) => "app.toggle", } } // Create our application actions here fn create(app: &App, application: &gtk::Application) { eprintln!("Creating actions!"); // about action: when activated it will show an about dialog let about = gio::SimpleAction::new("about", None); about.connect_activate(clone!(application => move |_action, _parameter| { show_about_dialog(&application); })); application.add_action(&about); // switch button action // credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant()); let switch_btn = &app.header_bar.switch_btn; switch_btn.connect_property_active_notify(clone!(switch_action => move |s| { eprintln!("The switch is now {}", &s.get_active().to_variant()); switch_action.change_state(&s.get_active().to_variant()); })); application.add_action(&switch_action); // toggle button action let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant()); let toggle_btn = &app.header_bar.toggle_button; toggle_btn.connect_toggled(|btn| { eprintln!("Button state is {}", btn.get_active()); let app = gio::Application::get_default().expect("No default application"); Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app); }); application.add_action(&toggle_action); // When activated, shuts down the application let quit = gio::SimpleAction::new("quit", None); quit.connect_activate(clone!(application => move |_action, _parameter| { application.quit(); })); application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]); application.add_action(&quit); } pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) { match self { Action::Quit => app.activate_action("quit", None), Action::About => app.activate_action("about", None), Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()), } } }
{ eprintln!("Shutting down the whole thing"); }
identifier_body
app.rs
use std::cell::RefCell; use std::error; use gio::{self, prelude::*}; use gtk::{self, prelude::*}; use crate::utils::*; use crate::header_bar::*; use crate::about_dialog::*; #[derive(Clone)] pub struct App { main_window: gtk::ApplicationWindow, pub header_bar: HeaderBar, url_input: gtk::Entry } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Action { About, Quit, ClickToggle(ToggleButtonState) } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ToggleButtonState { State1, State2, } impl<'a> From<&'a glib::Variant> for ToggleButtonState { fn from(v: &glib::Variant) -> ToggleButtonState { v.get::<bool>().expect("Invalid record state type").into() } } impl From<bool> for ToggleButtonState { fn from(v: bool) -> ToggleButtonState { match v { false => ToggleButtonState::State1, true => ToggleButtonState::State2, } } } impl From<ToggleButtonState> for glib::Variant { fn from(v: ToggleButtonState) -> glib::Variant { match v { ToggleButtonState::State1 => false.to_variant(), ToggleButtonState::State2 => true.to_variant(), } } } trait GtkComboBoxTrait { fn get_text(self: &Self) -> String; } impl GtkComboBoxTrait for gtk::ComboBoxText { fn get_text(&self) -> String { self.get_active_text() .expect("Failed to get widget text") .to_string() } } impl App { fn new(application: &gtk::Application) -> Result<App, Box<dyn error::Error>> { let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT); // Here build the UI but don't show it yet let main_window = gtk::ApplicationWindow::new(application); main_window.set_title("(poor) Postman"); main_window.set_border_width(5); main_window.set_position(gtk::WindowPosition::Center); main_window.set_default_size(840, 480); // Create headerbar for the application window let header_bar = HeaderBar::new(&main_window); // create a widget container, let layout = gtk::Box::new(gtk::Orientation::Vertical, 5); // Create a title label let url_title = gtk::Label::new(None); url_title.set_markup("<big>Type in your URL</big>"); // Pressing Alt+T will activate this button let button = gtk::Button::new(); let btn_label = gtk::Label::new_with_mnemonic( Some("_Click to trigger request") ); button.add(&btn_label); // Trigger request button let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); trigger_btn_row.pack_start(&button, false, true, 10); let url_input = gtk::Entry::new(); url_input.set_placeholder_text("(poor) Postman"); url_input.insert_text("http://httpbin.org/get", &mut 0); let verb_selector = gtk::ComboBoxText::new(); verb_selector.insert(0, "ID0", "GET"); verb_selector.insert(1, "ID1", "POST"); verb_selector.set_active(Some(0)); let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); verb_url_row.add(&verb_selector); // http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start // params: child, expand, fill, padding (px) verb_url_row.pack_start(&url_input, true, true, 0); // Payload horizontal block
let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); payload_row.set_sensitive(false); payload_row.add(&payload_title); payload_row.pack_start(&payload_input, true, true, 0); // when POST is selected, activate the payload input box // TODO: why don't I need to also clone "payload_input"? verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| { let txt = gtk::ComboBoxText::get_text(&verb_selector); match txt.as_ref() { "POST" => { payload_row.set_sensitive(true); payload_input.set_sensitive(true); } _ => { payload_row.set_sensitive(false); payload_input.set_sensitive(false); } } })); // connect the Button click to the callback button.connect_clicked(clone!(button, verb_selector, url_input, payload_input, tx => move |_| { button.set_sensitive(false); // and trigger HTTP thread spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), url_input.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // connect the <Return> keypress to the callback url_input.connect_activate(clone!(button, verb_selector, payload_input, tx => move |_entry| { button.set_sensitive(false); spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), _entry.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // container for the response let response_container = gtk::TextView::new(); response_container.set_editable(false); response_container.set_wrap_mode(gtk::WrapMode::Word); let buf = response_container.get_buffer().expect("I thought it could work..."); buf.set_text("The response will appear here..."); // add all widgets layout.add(&url_title); layout.add(&verb_url_row); layout.pack_start(&payload_row, false, true, 10); layout.add(&trigger_btn_row); layout.pack_start(&response_container, true, true, 10); // add the widget container to the window main_window.add(&layout); let app = App { main_window, url_input, header_bar, }; // Create the application actions Action::create(&app, &application); // attach thread receiver rx.attach(None, move |text| { // let text = format_response(text); buf.set_text(&text); // enable the button again button.set_sensitive(true); // keeps the channel open glib::Continue(true) }); Ok(app) } pub fn on_startup(application: &gtk::Application) { let app = match App::new(application) { Ok(app) => app, Err(err) => { eprintln!("Error creating app: {}",err); return; } }; application.connect_activate(clone!(app => move |_| { app.on_activate(); })); // cant get rid of this RefCell wrapping ... let app_container = RefCell::new(Some(app)); application.connect_shutdown(move |_| { let app = app_container .borrow_mut() .take() .expect("Shutdown called multiple times"); app.on_shutdown(); }); } fn on_activate(&self) { // Show our window and bring it to the foreground self.main_window.show_all(); self.main_window .present_with_time((glib::get_monotonic_time() / 1000) as u32); } // Called when the application shuts down. We drop our app struct here fn on_shutdown(self) { eprintln!("Shutting down the whole thing"); } } impl Action { // The full action name as is used in e.g. menu models pub fn full_name(self) -> &'static str { match self { Action::About => "app.about", Action::Quit => "app.quit", Action::ClickToggle(_) => "app.toggle", } } // Create our application actions here fn create(app: &App, application: &gtk::Application) { eprintln!("Creating actions!"); // about action: when activated it will show an about dialog let about = gio::SimpleAction::new("about", None); about.connect_activate(clone!(application => move |_action, _parameter| { show_about_dialog(&application); })); application.add_action(&about); // switch button action // credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant()); let switch_btn = &app.header_bar.switch_btn; switch_btn.connect_property_active_notify(clone!(switch_action => move |s| { eprintln!("The switch is now {}", &s.get_active().to_variant()); switch_action.change_state(&s.get_active().to_variant()); })); application.add_action(&switch_action); // toggle button action let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant()); let toggle_btn = &app.header_bar.toggle_button; toggle_btn.connect_toggled(|btn| { eprintln!("Button state is {}", btn.get_active()); let app = gio::Application::get_default().expect("No default application"); Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app); }); application.add_action(&toggle_action); // When activated, shuts down the application let quit = gio::SimpleAction::new("quit", None); quit.connect_activate(clone!(application => move |_action, _parameter| { application.quit(); })); application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]); application.add_action(&quit); } pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) { match self { Action::Quit => app.activate_action("quit", None), Action::About => app.activate_action("about", None), Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()), } } }
let payload_title = gtk::Label::new(None); payload_title.set_markup("<big>Payload</big>"); let payload_input = gtk::Entry::new(); payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0); payload_input.set_sensitive(false);
random_line_split
app.rs
use std::cell::RefCell; use std::error; use gio::{self, prelude::*}; use gtk::{self, prelude::*}; use crate::utils::*; use crate::header_bar::*; use crate::about_dialog::*; #[derive(Clone)] pub struct App { main_window: gtk::ApplicationWindow, pub header_bar: HeaderBar, url_input: gtk::Entry } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Action { About, Quit, ClickToggle(ToggleButtonState) } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ToggleButtonState { State1, State2, } impl<'a> From<&'a glib::Variant> for ToggleButtonState { fn
(v: &glib::Variant) -> ToggleButtonState { v.get::<bool>().expect("Invalid record state type").into() } } impl From<bool> for ToggleButtonState { fn from(v: bool) -> ToggleButtonState { match v { false => ToggleButtonState::State1, true => ToggleButtonState::State2, } } } impl From<ToggleButtonState> for glib::Variant { fn from(v: ToggleButtonState) -> glib::Variant { match v { ToggleButtonState::State1 => false.to_variant(), ToggleButtonState::State2 => true.to_variant(), } } } trait GtkComboBoxTrait { fn get_text(self: &Self) -> String; } impl GtkComboBoxTrait for gtk::ComboBoxText { fn get_text(&self) -> String { self.get_active_text() .expect("Failed to get widget text") .to_string() } } impl App { fn new(application: &gtk::Application) -> Result<App, Box<dyn error::Error>> { let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT); // Here build the UI but don't show it yet let main_window = gtk::ApplicationWindow::new(application); main_window.set_title("(poor) Postman"); main_window.set_border_width(5); main_window.set_position(gtk::WindowPosition::Center); main_window.set_default_size(840, 480); // Create headerbar for the application window let header_bar = HeaderBar::new(&main_window); // create a widget container, let layout = gtk::Box::new(gtk::Orientation::Vertical, 5); // Create a title label let url_title = gtk::Label::new(None); url_title.set_markup("<big>Type in your URL</big>"); // Pressing Alt+T will activate this button let button = gtk::Button::new(); let btn_label = gtk::Label::new_with_mnemonic( Some("_Click to trigger request") ); button.add(&btn_label); // Trigger request button let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); trigger_btn_row.pack_start(&button, false, true, 10); let url_input = gtk::Entry::new(); url_input.set_placeholder_text("(poor) Postman"); url_input.insert_text("http://httpbin.org/get", &mut 0); let verb_selector = gtk::ComboBoxText::new(); verb_selector.insert(0, "ID0", "GET"); verb_selector.insert(1, "ID1", "POST"); verb_selector.set_active(Some(0)); let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); verb_url_row.add(&verb_selector); // http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start // params: child, expand, fill, padding (px) verb_url_row.pack_start(&url_input, true, true, 0); // Payload horizontal block let payload_title = gtk::Label::new(None); payload_title.set_markup("<big>Payload</big>"); let payload_input = gtk::Entry::new(); payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0); payload_input.set_sensitive(false); let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5); payload_row.set_sensitive(false); payload_row.add(&payload_title); payload_row.pack_start(&payload_input, true, true, 0); // when POST is selected, activate the payload input box // TODO: why don't I need to also clone "payload_input"? verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| { let txt = gtk::ComboBoxText::get_text(&verb_selector); match txt.as_ref() { "POST" => { payload_row.set_sensitive(true); payload_input.set_sensitive(true); } _ => { payload_row.set_sensitive(false); payload_input.set_sensitive(false); } } })); // connect the Button click to the callback button.connect_clicked(clone!(button, verb_selector, url_input, payload_input, tx => move |_| { button.set_sensitive(false); // and trigger HTTP thread spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), url_input.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // connect the <Return> keypress to the callback url_input.connect_activate(clone!(button, verb_selector, payload_input, tx => move |_entry| { button.set_sensitive(false); spawn_thread( &tx, gtk::ComboBoxText::get_text(&verb_selector), _entry.get_buffer().get_text().to_owned(), Some(json!(payload_input.get_buffer().get_text().to_owned())) ); })); // container for the response let response_container = gtk::TextView::new(); response_container.set_editable(false); response_container.set_wrap_mode(gtk::WrapMode::Word); let buf = response_container.get_buffer().expect("I thought it could work..."); buf.set_text("The response will appear here..."); // add all widgets layout.add(&url_title); layout.add(&verb_url_row); layout.pack_start(&payload_row, false, true, 10); layout.add(&trigger_btn_row); layout.pack_start(&response_container, true, true, 10); // add the widget container to the window main_window.add(&layout); let app = App { main_window, url_input, header_bar, }; // Create the application actions Action::create(&app, &application); // attach thread receiver rx.attach(None, move |text| { // let text = format_response(text); buf.set_text(&text); // enable the button again button.set_sensitive(true); // keeps the channel open glib::Continue(true) }); Ok(app) } pub fn on_startup(application: &gtk::Application) { let app = match App::new(application) { Ok(app) => app, Err(err) => { eprintln!("Error creating app: {}",err); return; } }; application.connect_activate(clone!(app => move |_| { app.on_activate(); })); // cant get rid of this RefCell wrapping ... let app_container = RefCell::new(Some(app)); application.connect_shutdown(move |_| { let app = app_container .borrow_mut() .take() .expect("Shutdown called multiple times"); app.on_shutdown(); }); } fn on_activate(&self) { // Show our window and bring it to the foreground self.main_window.show_all(); self.main_window .present_with_time((glib::get_monotonic_time() / 1000) as u32); } // Called when the application shuts down. We drop our app struct here fn on_shutdown(self) { eprintln!("Shutting down the whole thing"); } } impl Action { // The full action name as is used in e.g. menu models pub fn full_name(self) -> &'static str { match self { Action::About => "app.about", Action::Quit => "app.quit", Action::ClickToggle(_) => "app.toggle", } } // Create our application actions here fn create(app: &App, application: &gtk::Application) { eprintln!("Creating actions!"); // about action: when activated it will show an about dialog let about = gio::SimpleAction::new("about", None); about.connect_activate(clone!(application => move |_action, _parameter| { show_about_dialog(&application); })); application.add_action(&about); // switch button action // credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant()); let switch_btn = &app.header_bar.switch_btn; switch_btn.connect_property_active_notify(clone!(switch_action => move |s| { eprintln!("The switch is now {}", &s.get_active().to_variant()); switch_action.change_state(&s.get_active().to_variant()); })); application.add_action(&switch_action); // toggle button action let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant()); let toggle_btn = &app.header_bar.toggle_button; toggle_btn.connect_toggled(|btn| { eprintln!("Button state is {}", btn.get_active()); let app = gio::Application::get_default().expect("No default application"); Action::ClickToggle(ToggleButtonState::from(btn.get_active())).trigger(&app); }); application.add_action(&toggle_action); // When activated, shuts down the application let quit = gio::SimpleAction::new("quit", None); quit.connect_activate(clone!(application => move |_action, _parameter| { application.quit(); })); application.set_accels_for_action(Action::Quit.full_name(), &["<Primary>Q"]); application.add_action(&quit); } pub fn trigger<A: IsA<gio::Application> + gio::ActionGroupExt>(self, app: &A) { match self { Action::Quit => app.activate_action("quit", None), Action::About => app.activate_action("about", None), Action::ClickToggle(new_state) => app.change_action_state("toggle", &new_state.into()), } } }
from
identifier_name
footprint_analysis.rs
// BSD 2-Clause License // // Copyright (c) 2020 Alasdair Armstrong // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! This module implements footprint analysis for the concurrency tool //! //! The axiomatic memory model requires deriving (syntactic) address, //! data, and control dependencies. As such, we need to know what //! registers could be touched by each instruction based purely on its //! concrete opcode. For this we analyse all the traces from a litmus //! test run, and use symbolic execution on each opcode again. use crossbeam::queue::SegQueue; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::fmt; use std::io::Write; use std::path::Path; use std::sync::Arc; use std::time::Instant; use isla_lib::cache::{Cacheable, Cachekey}; use isla_lib::concrete::BV; use isla_lib::config::ISAConfig; use isla_lib::executor; use isla_lib::executor::LocalFrame; use isla_lib::ir::*; use isla_lib::log; use isla_lib::simplify::{EventReferences, Taints}; use isla_lib::smt::{Accessor, EvPath, Event, Sym}; use isla_lib::zencode; #[derive(Debug, Serialize, Deserialize)] pub struct Footprint { /// Tracks which (symbolic) registers / memory reads can feed into /// a memory write within an instruction write_data_taints: (Taints, bool), /// Tracks with (symbolic) registers / memory reads can feed into /// a memory operator (read/write) address within an instruction mem_addr_taints: (Taints, bool), /// Tracks which (symbolic) registers / memory reads can feed into /// the address of a branch branch_addr_taints: (Taints, bool), /// The set of register reads (with subfield granularity) register_reads: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes (also with subfield granularity) register_writes: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes where the value was tainted by a memory read register_writes_tainted: HashSet<(Name, Vec<Accessor>)>, /// All register writes to the following registers are ignored for /// tracking dependencies within an instruction register_writes_ignored: HashSet<Name>, /// A store is any instruction with a WriteMem event is_store: bool, /// A load is any instruction with a ReadMem event is_load: bool, /// A branch is any instruction with a Branch event is_branch: bool, /// An exclusive is any event with an exclusive read or write kind. is_exclusive: bool, /// A cache-op is any event with a CacheOp event is_cache_op: bool, } pub struct Footprintkey { opcode: String, } impl Cachekey for Footprintkey { fn key(&self) -> String
} impl Cacheable for Footprint { type Key = Footprintkey; } impl Footprint { fn new() -> Self { Footprint { write_data_taints: (HashSet::new(), false), mem_addr_taints: (HashSet::new(), false), branch_addr_taints: (HashSet::new(), false), register_reads: HashSet::new(), register_writes: HashSet::new(), register_writes_tainted: HashSet::new(), register_writes_ignored: HashSet::new(), is_store: false, is_load: false, is_branch: false, is_exclusive: false, is_cache_op: false, } } /// This just prints the footprint information in a human-readable /// form for debugging. pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> { write!(buf, "Footprint:\n Memory write data:")?; for (reg, accessor) in &self.write_data_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Memory address:")?; for (reg, accessor) in &self.mem_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Branch address:")?; for (reg, accessor) in &self.branch_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register reads:")?; for (reg, accessor) in &self.register_reads { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes:")?; for (reg, accessor) in &self.register_writes { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes (tainted):")?; for (reg, accessor) in &self.register_writes_tainted { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Is store: {}", self.is_store)?; write!(buf, "\n Is load: {}", self.is_load)?; write!(buf, "\n Is exclusive: {}", self.is_exclusive)?; write!(buf, "\n Is branch: {}", self.is_branch)?; writeln!(buf)?; Ok(()) } } // There is an rmw dependency from `from` to `to` if `from` is a // load-exclusive and `to` is a store-exclusive and there are no // intervening exclusives. #[allow(clippy::needless_range_loop)] pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from > to { return false; } let from_footprint = footprints.get(&instrs[from]).unwrap(); if !(from_footprint.is_exclusive && from_footprint.is_load) { return false; } for i in (from + 1)..to { if footprints.get(&instrs[i]).unwrap().is_exclusive { return false; } } let to_footprint = footprints.get(&instrs[to]).unwrap(); to_footprint.is_exclusive && to_footprint.is_store } /// The set of registers that could be (syntactically) touched by the /// first instruction before reaching the second. #[allow(clippy::needless_range_loop)] fn touched_by<B: BV>( from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>, ) -> HashSet<(Name, Vec<Accessor>)> { let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = HashSet::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.insert(wreg.clone()); } } } } if new_touched.is_empty() { for wreg in &footprint.register_writes { touched.remove(wreg); } } else { new_touched.drain().for_each(|wreg| { touched.insert(wreg); }) } } touched } /// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// Panics if either `from` or `to` are out-of-bounds in `instrs`, or /// if an instruction does not have a footprint. pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be po-order-later than `from` for the dependency to exist. if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); // If any of the registers transitively touched by the first // instruction's register writes can feed into a memory address // used by the last we have an address dependency. for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` #[allow(clippy::needless_range_loop)] pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be a program-order later load or store let to_footprint = footprints.get(&instrs[from]).unwrap(); if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) { return false; } let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = Vec::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); if footprint.is_branch { for reg in &footprint.branch_addr_taints.0 { if touched.contains(&reg) { return true; } } } for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.push(wreg.clone()); } } } } new_touched.drain(..).for_each(|wreg| { touched.insert(wreg); }) } false } #[derive(Debug)] pub enum FootprintError { NoIslaFootprintFn, SymbolicInstruction, ExecutionError(String), } impl fmt::Display for FootprintError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use FootprintError::*; match self { NoIslaFootprintFn => write!( f, "Footprint analysis failed. To calculate the syntactic\n\ register footprint, isla expects a sail function\n\ `isla_footprint' to be available in the model, which\n\ can be used to decode and execute an instruction" ), SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"), ExecutionError(msg) => write!(f, "{}", msg), } } } impl Error for FootprintError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } } /// # Arguments /// /// * `num_threads` - How many threads to use for analysing footprints /// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test /// * `lets` - The initial state of all top-level letbindings in the Sail specification /// * `regs` - The initial register state /// * `shared_state` - The state shared between all symbolic execution runs /// * `isa_config` - The architecture specific configuration information /// * `cache_dir` - A directory to cache footprint results pub fn footprint_analysis<'ir, B, P>( num_threads: usize, thread_buckets: &[Vec<EvPath<B>>], lets: &Bindings<'ir, B>, regs: &Bindings<'ir, B>, shared_state: &SharedState<B>, isa_config: &ISAConfig<B>, cache_dir: P, ) -> Result<HashMap<B, Footprint>, FootprintError> where B: BV, P: AsRef<Path>, { use FootprintError::*; let mut concrete_opcodes: HashSet<B> = HashSet::new(); let mut footprints = HashMap::new(); for thread in thread_buckets { for path in thread { for event in path { match event { Event::Instr(Val::Bits(bv)) => { if let Some(footprint) = Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref()) { footprints.insert(*bv, footprint); } else { concrete_opcodes.insert(*bv); } } Event::Instr(_) => return Err(SymbolicInstruction), _ => (), } } } } log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len())); let function_id = match shared_state.symtab.get("zisla_footprint") { Some(id) => id, None => return Err(NoIslaFootprintFn), }; let (args, _, instrs) = shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!"); let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes .iter() .enumerate() .map(|(i, opcode)| { ( opcode, LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs) .add_lets(lets) .add_regs(regs) .task(i), ) }) .unzip(); let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()]; let queue = Arc::new(SegQueue::new()); let now = Instant::now(); executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector); log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis())); loop { match queue.pop() { Ok(Ok((task_id, mut events))) => { let mut events: Vec<Event<B>> = events .drain(..) .rev() // The first cycle is reserved for initialization .skip_while(|ev| !ev.is_cycle()) .filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork()) .collect(); isla_lib::simplify::remove_unused(&mut events); footprint_buckets[task_id].push(events) } // Error during execution Ok(Err(msg)) => return Err(ExecutionError(msg)), // Empty queue Err(_) => break, } } let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum(); log!(log::VERBOSE, &format!("There are {} footprints", num_footprints)); let read_exclusives: Vec<usize> = isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); let write_exclusives: Vec<usize> = isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); for (i, paths) in footprint_buckets.iter().enumerate() { let opcode = task_opcodes[i]; log!(log::VERBOSE, &format!("{:?}", opcode)); let mut footprint = Footprint::new(); for events in paths { let evrefs = EventReferences::from_events(events); let mut forks: Vec<Sym> = Vec::new(); for event in events { match event { Event::Fork(_, v, _) => forks.push(*v), Event::ReadReg(reg, accessor, _) if !isa_config.ignored_registers.contains(reg) => { footprint.register_reads.insert((*reg, accessor.clone())); } Event::WriteReg(reg, accessor, data) if !isa_config.ignored_registers.contains(reg) => { footprint.register_writes.insert((*reg, accessor.clone())); // If the data written to the register is tainted by a value read // from memory record this fact. if evrefs.value_taints(data, events).1 { footprint.register_writes_tainted.insert((*reg, accessor.clone())); } } Event::MarkReg { reg, mark } => { if mark == "ignore_write" { footprint.register_writes_ignored.insert(*reg); } } Event::ReadMem { address, .. } => { footprint.is_load = true; if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::WriteMem { address, data, .. } => { footprint.is_store = true; if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ); evrefs.collect_value_taints( data, events, &mut footprint.write_data_taints.0, &mut footprint.write_data_taints.1, ); } Event::CacheOp { address, .. } => { footprint.is_cache_op = true; evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::Branch { address } => { footprint.is_branch = true; evrefs.collect_value_taints( address, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ); for v in &forks { evrefs.collect_taints( *v, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ) } } _ => (), } } } footprint.cache(Footprintkey { opcode: opcode.to_string() }, cache_dir.as_ref()); footprints.insert(opcode, footprint); } Ok(footprints) }
{ format!("opcode_{}", self.opcode) }
identifier_body
footprint_analysis.rs
// BSD 2-Clause License // // Copyright (c) 2020 Alasdair Armstrong // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! This module implements footprint analysis for the concurrency tool //! //! The axiomatic memory model requires deriving (syntactic) address, //! data, and control dependencies. As such, we need to know what //! registers could be touched by each instruction based purely on its //! concrete opcode. For this we analyse all the traces from a litmus //! test run, and use symbolic execution on each opcode again. use crossbeam::queue::SegQueue; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::fmt; use std::io::Write; use std::path::Path; use std::sync::Arc; use std::time::Instant; use isla_lib::cache::{Cacheable, Cachekey}; use isla_lib::concrete::BV; use isla_lib::config::ISAConfig; use isla_lib::executor; use isla_lib::executor::LocalFrame; use isla_lib::ir::*; use isla_lib::log; use isla_lib::simplify::{EventReferences, Taints}; use isla_lib::smt::{Accessor, EvPath, Event, Sym}; use isla_lib::zencode; #[derive(Debug, Serialize, Deserialize)] pub struct Footprint { /// Tracks which (symbolic) registers / memory reads can feed into /// a memory write within an instruction write_data_taints: (Taints, bool), /// Tracks with (symbolic) registers / memory reads can feed into /// a memory operator (read/write) address within an instruction mem_addr_taints: (Taints, bool), /// Tracks which (symbolic) registers / memory reads can feed into /// the address of a branch branch_addr_taints: (Taints, bool), /// The set of register reads (with subfield granularity) register_reads: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes (also with subfield granularity) register_writes: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes where the value was tainted by a memory read register_writes_tainted: HashSet<(Name, Vec<Accessor>)>, /// All register writes to the following registers are ignored for /// tracking dependencies within an instruction register_writes_ignored: HashSet<Name>, /// A store is any instruction with a WriteMem event is_store: bool, /// A load is any instruction with a ReadMem event is_load: bool, /// A branch is any instruction with a Branch event is_branch: bool, /// An exclusive is any event with an exclusive read or write kind. is_exclusive: bool, /// A cache-op is any event with a CacheOp event is_cache_op: bool, } pub struct Footprintkey { opcode: String, } impl Cachekey for Footprintkey { fn key(&self) -> String { format!("opcode_{}", self.opcode) } } impl Cacheable for Footprint { type Key = Footprintkey; } impl Footprint { fn new() -> Self { Footprint { write_data_taints: (HashSet::new(), false), mem_addr_taints: (HashSet::new(), false), branch_addr_taints: (HashSet::new(), false), register_reads: HashSet::new(), register_writes: HashSet::new(), register_writes_tainted: HashSet::new(), register_writes_ignored: HashSet::new(), is_store: false, is_load: false, is_branch: false, is_exclusive: false, is_cache_op: false, } } /// This just prints the footprint information in a human-readable /// form for debugging. pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> { write!(buf, "Footprint:\n Memory write data:")?; for (reg, accessor) in &self.write_data_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Memory address:")?; for (reg, accessor) in &self.mem_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Branch address:")?; for (reg, accessor) in &self.branch_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register reads:")?; for (reg, accessor) in &self.register_reads { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes:")?; for (reg, accessor) in &self.register_writes { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes (tainted):")?; for (reg, accessor) in &self.register_writes_tainted { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Is store: {}", self.is_store)?; write!(buf, "\n Is load: {}", self.is_load)?; write!(buf, "\n Is exclusive: {}", self.is_exclusive)?; write!(buf, "\n Is branch: {}", self.is_branch)?; writeln!(buf)?; Ok(()) } } // There is an rmw dependency from `from` to `to` if `from` is a // load-exclusive and `to` is a store-exclusive and there are no // intervening exclusives. #[allow(clippy::needless_range_loop)] pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from > to { return false; } let from_footprint = footprints.get(&instrs[from]).unwrap(); if !(from_footprint.is_exclusive && from_footprint.is_load) { return false; } for i in (from + 1)..to { if footprints.get(&instrs[i]).unwrap().is_exclusive { return false; } } let to_footprint = footprints.get(&instrs[to]).unwrap(); to_footprint.is_exclusive && to_footprint.is_store } /// The set of registers that could be (syntactically) touched by the /// first instruction before reaching the second. #[allow(clippy::needless_range_loop)] fn touched_by<B: BV>( from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>, ) -> HashSet<(Name, Vec<Accessor>)> { let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = HashSet::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.insert(wreg.clone()); } } } } if new_touched.is_empty() { for wreg in &footprint.register_writes { touched.remove(wreg); } } else { new_touched.drain().for_each(|wreg| { touched.insert(wreg); }) } } touched } /// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// Panics if either `from` or `to` are out-of-bounds in `instrs`, or /// if an instruction does not have a footprint. pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be po-order-later than `from` for the dependency to exist. if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); // If any of the registers transitively touched by the first // instruction's register writes can feed into a memory address // used by the last we have an address dependency. for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` pub fn
<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` #[allow(clippy::needless_range_loop)] pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be a program-order later load or store let to_footprint = footprints.get(&instrs[from]).unwrap(); if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) { return false; } let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = Vec::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); if footprint.is_branch { for reg in &footprint.branch_addr_taints.0 { if touched.contains(&reg) { return true; } } } for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.push(wreg.clone()); } } } } new_touched.drain(..).for_each(|wreg| { touched.insert(wreg); }) } false } #[derive(Debug)] pub enum FootprintError { NoIslaFootprintFn, SymbolicInstruction, ExecutionError(String), } impl fmt::Display for FootprintError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use FootprintError::*; match self { NoIslaFootprintFn => write!( f, "Footprint analysis failed. To calculate the syntactic\n\ register footprint, isla expects a sail function\n\ `isla_footprint' to be available in the model, which\n\ can be used to decode and execute an instruction" ), SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"), ExecutionError(msg) => write!(f, "{}", msg), } } } impl Error for FootprintError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } } /// # Arguments /// /// * `num_threads` - How many threads to use for analysing footprints /// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test /// * `lets` - The initial state of all top-level letbindings in the Sail specification /// * `regs` - The initial register state /// * `shared_state` - The state shared between all symbolic execution runs /// * `isa_config` - The architecture specific configuration information /// * `cache_dir` - A directory to cache footprint results pub fn footprint_analysis<'ir, B, P>( num_threads: usize, thread_buckets: &[Vec<EvPath<B>>], lets: &Bindings<'ir, B>, regs: &Bindings<'ir, B>, shared_state: &SharedState<B>, isa_config: &ISAConfig<B>, cache_dir: P, ) -> Result<HashMap<B, Footprint>, FootprintError> where B: BV, P: AsRef<Path>, { use FootprintError::*; let mut concrete_opcodes: HashSet<B> = HashSet::new(); let mut footprints = HashMap::new(); for thread in thread_buckets { for path in thread { for event in path { match event { Event::Instr(Val::Bits(bv)) => { if let Some(footprint) = Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref()) { footprints.insert(*bv, footprint); } else { concrete_opcodes.insert(*bv); } } Event::Instr(_) => return Err(SymbolicInstruction), _ => (), } } } } log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len())); let function_id = match shared_state.symtab.get("zisla_footprint") { Some(id) => id, None => return Err(NoIslaFootprintFn), }; let (args, _, instrs) = shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!"); let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes .iter() .enumerate() .map(|(i, opcode)| { ( opcode, LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs) .add_lets(lets) .add_regs(regs) .task(i), ) }) .unzip(); let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()]; let queue = Arc::new(SegQueue::new()); let now = Instant::now(); executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector); log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis())); loop { match queue.pop() { Ok(Ok((task_id, mut events))) => { let mut events: Vec<Event<B>> = events .drain(..) .rev() // The first cycle is reserved for initialization .skip_while(|ev| !ev.is_cycle()) .filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork()) .collect(); isla_lib::simplify::remove_unused(&mut events); footprint_buckets[task_id].push(events) } // Error during execution Ok(Err(msg)) => return Err(ExecutionError(msg)), // Empty queue Err(_) => break, } } let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum(); log!(log::VERBOSE, &format!("There are {} footprints", num_footprints)); let read_exclusives: Vec<usize> = isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); let write_exclusives: Vec<usize> = isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); for (i, paths) in footprint_buckets.iter().enumerate() { let opcode = task_opcodes[i]; log!(log::VERBOSE, &format!("{:?}", opcode)); let mut footprint = Footprint::new(); for events in paths { let evrefs = EventReferences::from_events(events); let mut forks: Vec<Sym> = Vec::new(); for event in events { match event { Event::Fork(_, v, _) => forks.push(*v), Event::ReadReg(reg, accessor, _) if !isa_config.ignored_registers.contains(reg) => { footprint.register_reads.insert((*reg, accessor.clone())); } Event::WriteReg(reg, accessor, data) if !isa_config.ignored_registers.contains(reg) => { footprint.register_writes.insert((*reg, accessor.clone())); // If the data written to the register is tainted by a value read // from memory record this fact. if evrefs.value_taints(data, events).1 { footprint.register_writes_tainted.insert((*reg, accessor.clone())); } } Event::MarkReg { reg, mark } => { if mark == "ignore_write" { footprint.register_writes_ignored.insert(*reg); } } Event::ReadMem { address, .. } => { footprint.is_load = true; if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::WriteMem { address, data, .. } => { footprint.is_store = true; if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ); evrefs.collect_value_taints( data, events, &mut footprint.write_data_taints.0, &mut footprint.write_data_taints.1, ); } Event::CacheOp { address, .. } => { footprint.is_cache_op = true; evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::Branch { address } => { footprint.is_branch = true; evrefs.collect_value_taints( address, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ); for v in &forks { evrefs.collect_taints( *v, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ) } } _ => (), } } } footprint.cache(Footprintkey { opcode: opcode.to_string() }, cache_dir.as_ref()); footprints.insert(opcode, footprint); } Ok(footprints) }
data_dep
identifier_name
footprint_analysis.rs
// BSD 2-Clause License // // Copyright (c) 2020 Alasdair Armstrong // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! This module implements footprint analysis for the concurrency tool //! //! The axiomatic memory model requires deriving (syntactic) address, //! data, and control dependencies. As such, we need to know what //! registers could be touched by each instruction based purely on its //! concrete opcode. For this we analyse all the traces from a litmus //! test run, and use symbolic execution on each opcode again. use crossbeam::queue::SegQueue; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::fmt; use std::io::Write; use std::path::Path; use std::sync::Arc; use std::time::Instant; use isla_lib::cache::{Cacheable, Cachekey}; use isla_lib::concrete::BV; use isla_lib::config::ISAConfig; use isla_lib::executor; use isla_lib::executor::LocalFrame; use isla_lib::ir::*; use isla_lib::log; use isla_lib::simplify::{EventReferences, Taints}; use isla_lib::smt::{Accessor, EvPath, Event, Sym}; use isla_lib::zencode; #[derive(Debug, Serialize, Deserialize)] pub struct Footprint { /// Tracks which (symbolic) registers / memory reads can feed into /// a memory write within an instruction write_data_taints: (Taints, bool), /// Tracks with (symbolic) registers / memory reads can feed into /// a memory operator (read/write) address within an instruction mem_addr_taints: (Taints, bool), /// Tracks which (symbolic) registers / memory reads can feed into /// the address of a branch branch_addr_taints: (Taints, bool), /// The set of register reads (with subfield granularity) register_reads: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes (also with subfield granularity) register_writes: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes where the value was tainted by a memory read register_writes_tainted: HashSet<(Name, Vec<Accessor>)>, /// All register writes to the following registers are ignored for /// tracking dependencies within an instruction register_writes_ignored: HashSet<Name>, /// A store is any instruction with a WriteMem event is_store: bool, /// A load is any instruction with a ReadMem event is_load: bool, /// A branch is any instruction with a Branch event is_branch: bool, /// An exclusive is any event with an exclusive read or write kind. is_exclusive: bool, /// A cache-op is any event with a CacheOp event is_cache_op: bool, } pub struct Footprintkey { opcode: String, } impl Cachekey for Footprintkey { fn key(&self) -> String { format!("opcode_{}", self.opcode) } } impl Cacheable for Footprint { type Key = Footprintkey; } impl Footprint { fn new() -> Self { Footprint { write_data_taints: (HashSet::new(), false), mem_addr_taints: (HashSet::new(), false), branch_addr_taints: (HashSet::new(), false), register_reads: HashSet::new(), register_writes: HashSet::new(), register_writes_tainted: HashSet::new(), register_writes_ignored: HashSet::new(), is_store: false, is_load: false, is_branch: false, is_exclusive: false, is_cache_op: false, } } /// This just prints the footprint information in a human-readable /// form for debugging. pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> { write!(buf, "Footprint:\n Memory write data:")?; for (reg, accessor) in &self.write_data_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Memory address:")?; for (reg, accessor) in &self.mem_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Branch address:")?; for (reg, accessor) in &self.branch_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register reads:")?; for (reg, accessor) in &self.register_reads { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes:")?; for (reg, accessor) in &self.register_writes { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes (tainted):")?; for (reg, accessor) in &self.register_writes_tainted { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Is store: {}", self.is_store)?; write!(buf, "\n Is load: {}", self.is_load)?; write!(buf, "\n Is exclusive: {}", self.is_exclusive)?; write!(buf, "\n Is branch: {}", self.is_branch)?; writeln!(buf)?; Ok(()) } } // There is an rmw dependency from `from` to `to` if `from` is a // load-exclusive and `to` is a store-exclusive and there are no // intervening exclusives. #[allow(clippy::needless_range_loop)] pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from > to { return false; } let from_footprint = footprints.get(&instrs[from]).unwrap(); if !(from_footprint.is_exclusive && from_footprint.is_load) { return false; } for i in (from + 1)..to { if footprints.get(&instrs[i]).unwrap().is_exclusive { return false; } }
let to_footprint = footprints.get(&instrs[to]).unwrap(); to_footprint.is_exclusive && to_footprint.is_store } /// The set of registers that could be (syntactically) touched by the /// first instruction before reaching the second. #[allow(clippy::needless_range_loop)] fn touched_by<B: BV>( from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>, ) -> HashSet<(Name, Vec<Accessor>)> { let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = HashSet::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.insert(wreg.clone()); } } } } if new_touched.is_empty() { for wreg in &footprint.register_writes { touched.remove(wreg); } } else { new_touched.drain().for_each(|wreg| { touched.insert(wreg); }) } } touched } /// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// Panics if either `from` or `to` are out-of-bounds in `instrs`, or /// if an instruction does not have a footprint. pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be po-order-later than `from` for the dependency to exist. if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); // If any of the registers transitively touched by the first // instruction's register writes can feed into a memory address // used by the last we have an address dependency. for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` #[allow(clippy::needless_range_loop)] pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be a program-order later load or store let to_footprint = footprints.get(&instrs[from]).unwrap(); if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) { return false; } let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = Vec::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); if footprint.is_branch { for reg in &footprint.branch_addr_taints.0 { if touched.contains(&reg) { return true; } } } for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.push(wreg.clone()); } } } } new_touched.drain(..).for_each(|wreg| { touched.insert(wreg); }) } false } #[derive(Debug)] pub enum FootprintError { NoIslaFootprintFn, SymbolicInstruction, ExecutionError(String), } impl fmt::Display for FootprintError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use FootprintError::*; match self { NoIslaFootprintFn => write!( f, "Footprint analysis failed. To calculate the syntactic\n\ register footprint, isla expects a sail function\n\ `isla_footprint' to be available in the model, which\n\ can be used to decode and execute an instruction" ), SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"), ExecutionError(msg) => write!(f, "{}", msg), } } } impl Error for FootprintError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } } /// # Arguments /// /// * `num_threads` - How many threads to use for analysing footprints /// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test /// * `lets` - The initial state of all top-level letbindings in the Sail specification /// * `regs` - The initial register state /// * `shared_state` - The state shared between all symbolic execution runs /// * `isa_config` - The architecture specific configuration information /// * `cache_dir` - A directory to cache footprint results pub fn footprint_analysis<'ir, B, P>( num_threads: usize, thread_buckets: &[Vec<EvPath<B>>], lets: &Bindings<'ir, B>, regs: &Bindings<'ir, B>, shared_state: &SharedState<B>, isa_config: &ISAConfig<B>, cache_dir: P, ) -> Result<HashMap<B, Footprint>, FootprintError> where B: BV, P: AsRef<Path>, { use FootprintError::*; let mut concrete_opcodes: HashSet<B> = HashSet::new(); let mut footprints = HashMap::new(); for thread in thread_buckets { for path in thread { for event in path { match event { Event::Instr(Val::Bits(bv)) => { if let Some(footprint) = Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref()) { footprints.insert(*bv, footprint); } else { concrete_opcodes.insert(*bv); } } Event::Instr(_) => return Err(SymbolicInstruction), _ => (), } } } } log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len())); let function_id = match shared_state.symtab.get("zisla_footprint") { Some(id) => id, None => return Err(NoIslaFootprintFn), }; let (args, _, instrs) = shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!"); let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes .iter() .enumerate() .map(|(i, opcode)| { ( opcode, LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs) .add_lets(lets) .add_regs(regs) .task(i), ) }) .unzip(); let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()]; let queue = Arc::new(SegQueue::new()); let now = Instant::now(); executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector); log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis())); loop { match queue.pop() { Ok(Ok((task_id, mut events))) => { let mut events: Vec<Event<B>> = events .drain(..) .rev() // The first cycle is reserved for initialization .skip_while(|ev| !ev.is_cycle()) .filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork()) .collect(); isla_lib::simplify::remove_unused(&mut events); footprint_buckets[task_id].push(events) } // Error during execution Ok(Err(msg)) => return Err(ExecutionError(msg)), // Empty queue Err(_) => break, } } let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum(); log!(log::VERBOSE, &format!("There are {} footprints", num_footprints)); let read_exclusives: Vec<usize> = isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); let write_exclusives: Vec<usize> = isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); for (i, paths) in footprint_buckets.iter().enumerate() { let opcode = task_opcodes[i]; log!(log::VERBOSE, &format!("{:?}", opcode)); let mut footprint = Footprint::new(); for events in paths { let evrefs = EventReferences::from_events(events); let mut forks: Vec<Sym> = Vec::new(); for event in events { match event { Event::Fork(_, v, _) => forks.push(*v), Event::ReadReg(reg, accessor, _) if !isa_config.ignored_registers.contains(reg) => { footprint.register_reads.insert((*reg, accessor.clone())); } Event::WriteReg(reg, accessor, data) if !isa_config.ignored_registers.contains(reg) => { footprint.register_writes.insert((*reg, accessor.clone())); // If the data written to the register is tainted by a value read // from memory record this fact. if evrefs.value_taints(data, events).1 { footprint.register_writes_tainted.insert((*reg, accessor.clone())); } } Event::MarkReg { reg, mark } => { if mark == "ignore_write" { footprint.register_writes_ignored.insert(*reg); } } Event::ReadMem { address, .. } => { footprint.is_load = true; if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::WriteMem { address, data, .. } => { footprint.is_store = true; if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ); evrefs.collect_value_taints( data, events, &mut footprint.write_data_taints.0, &mut footprint.write_data_taints.1, ); } Event::CacheOp { address, .. } => { footprint.is_cache_op = true; evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::Branch { address } => { footprint.is_branch = true; evrefs.collect_value_taints( address, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ); for v in &forks { evrefs.collect_taints( *v, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ) } } _ => (), } } } footprint.cache(Footprintkey { opcode: opcode.to_string() }, cache_dir.as_ref()); footprints.insert(opcode, footprint); } Ok(footprints) }
random_line_split
footprint_analysis.rs
// BSD 2-Clause License // // Copyright (c) 2020 Alasdair Armstrong // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! This module implements footprint analysis for the concurrency tool //! //! The axiomatic memory model requires deriving (syntactic) address, //! data, and control dependencies. As such, we need to know what //! registers could be touched by each instruction based purely on its //! concrete opcode. For this we analyse all the traces from a litmus //! test run, and use symbolic execution on each opcode again. use crossbeam::queue::SegQueue; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::fmt; use std::io::Write; use std::path::Path; use std::sync::Arc; use std::time::Instant; use isla_lib::cache::{Cacheable, Cachekey}; use isla_lib::concrete::BV; use isla_lib::config::ISAConfig; use isla_lib::executor; use isla_lib::executor::LocalFrame; use isla_lib::ir::*; use isla_lib::log; use isla_lib::simplify::{EventReferences, Taints}; use isla_lib::smt::{Accessor, EvPath, Event, Sym}; use isla_lib::zencode; #[derive(Debug, Serialize, Deserialize)] pub struct Footprint { /// Tracks which (symbolic) registers / memory reads can feed into /// a memory write within an instruction write_data_taints: (Taints, bool), /// Tracks with (symbolic) registers / memory reads can feed into /// a memory operator (read/write) address within an instruction mem_addr_taints: (Taints, bool), /// Tracks which (symbolic) registers / memory reads can feed into /// the address of a branch branch_addr_taints: (Taints, bool), /// The set of register reads (with subfield granularity) register_reads: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes (also with subfield granularity) register_writes: HashSet<(Name, Vec<Accessor>)>, /// The set of register writes where the value was tainted by a memory read register_writes_tainted: HashSet<(Name, Vec<Accessor>)>, /// All register writes to the following registers are ignored for /// tracking dependencies within an instruction register_writes_ignored: HashSet<Name>, /// A store is any instruction with a WriteMem event is_store: bool, /// A load is any instruction with a ReadMem event is_load: bool, /// A branch is any instruction with a Branch event is_branch: bool, /// An exclusive is any event with an exclusive read or write kind. is_exclusive: bool, /// A cache-op is any event with a CacheOp event is_cache_op: bool, } pub struct Footprintkey { opcode: String, } impl Cachekey for Footprintkey { fn key(&self) -> String { format!("opcode_{}", self.opcode) } } impl Cacheable for Footprint { type Key = Footprintkey; } impl Footprint { fn new() -> Self { Footprint { write_data_taints: (HashSet::new(), false), mem_addr_taints: (HashSet::new(), false), branch_addr_taints: (HashSet::new(), false), register_reads: HashSet::new(), register_writes: HashSet::new(), register_writes_tainted: HashSet::new(), register_writes_ignored: HashSet::new(), is_store: false, is_load: false, is_branch: false, is_exclusive: false, is_cache_op: false, } } /// This just prints the footprint information in a human-readable /// form for debugging. pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> { write!(buf, "Footprint:\n Memory write data:")?; for (reg, accessor) in &self.write_data_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Memory address:")?; for (reg, accessor) in &self.mem_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Branch address:")?; for (reg, accessor) in &self.branch_addr_taints.0 { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register reads:")?; for (reg, accessor) in &self.register_reads { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes:")?; for (reg, accessor) in &self.register_writes { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Register writes (tainted):")?; for (reg, accessor) in &self.register_writes_tainted { write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?; for component in accessor { component.pretty(buf, symtab)? } } write!(buf, "\n Is store: {}", self.is_store)?; write!(buf, "\n Is load: {}", self.is_load)?; write!(buf, "\n Is exclusive: {}", self.is_exclusive)?; write!(buf, "\n Is branch: {}", self.is_branch)?; writeln!(buf)?; Ok(()) } } // There is an rmw dependency from `from` to `to` if `from` is a // load-exclusive and `to` is a store-exclusive and there are no // intervening exclusives. #[allow(clippy::needless_range_loop)] pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from > to { return false; } let from_footprint = footprints.get(&instrs[from]).unwrap(); if !(from_footprint.is_exclusive && from_footprint.is_load) { return false; } for i in (from + 1)..to { if footprints.get(&instrs[i]).unwrap().is_exclusive
} let to_footprint = footprints.get(&instrs[to]).unwrap(); to_footprint.is_exclusive && to_footprint.is_store } /// The set of registers that could be (syntactically) touched by the /// first instruction before reaching the second. #[allow(clippy::needless_range_loop)] fn touched_by<B: BV>( from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>, ) -> HashSet<(Name, Vec<Accessor>)> { let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = HashSet::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.insert(wreg.clone()); } } } } if new_touched.is_empty() { for wreg in &footprint.register_writes { touched.remove(wreg); } } else { new_touched.drain().for_each(|wreg| { touched.insert(wreg); }) } } touched } /// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// Panics if either `from` or `to` are out-of-bounds in `instrs`, or /// if an instruction does not have a footprint. pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be po-order-later than `from` for the dependency to exist. if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); // If any of the registers transitively touched by the first // instruction's register writes can feed into a memory address // used by the last we have an address dependency. for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { if from >= to { return false; } let touched = touched_by(from, to, instrs, footprints); for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 { if touched.contains(reg) { return true; } } false } /// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`. /// /// # Panics /// /// See `addr_dep` #[allow(clippy::needless_range_loop)] pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool { // `to` must be a program-order later load or store let to_footprint = footprints.get(&instrs[from]).unwrap(); if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) { return false; } let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone(); let mut new_touched = Vec::new(); for i in (from + 1)..to { let footprint = footprints.get(&instrs[i]).unwrap(); if footprint.is_branch { for reg in &footprint.branch_addr_taints.0 { if touched.contains(&reg) { return true; } } } for rreg in &touched { if footprint.register_reads.contains(rreg) { for wreg in &footprint.register_writes { if !footprint.register_writes_ignored.contains(&wreg.0) { new_touched.push(wreg.clone()); } } } } new_touched.drain(..).for_each(|wreg| { touched.insert(wreg); }) } false } #[derive(Debug)] pub enum FootprintError { NoIslaFootprintFn, SymbolicInstruction, ExecutionError(String), } impl fmt::Display for FootprintError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use FootprintError::*; match self { NoIslaFootprintFn => write!( f, "Footprint analysis failed. To calculate the syntactic\n\ register footprint, isla expects a sail function\n\ `isla_footprint' to be available in the model, which\n\ can be used to decode and execute an instruction" ), SymbolicInstruction => write!(f, "Instruction opcode found during footprint analysis was symbolic"), ExecutionError(msg) => write!(f, "{}", msg), } } } impl Error for FootprintError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } } /// # Arguments /// /// * `num_threads` - How many threads to use for analysing footprints /// * `thread_buckets` - A vector of paths (event vectors) for each thread in the litmus test /// * `lets` - The initial state of all top-level letbindings in the Sail specification /// * `regs` - The initial register state /// * `shared_state` - The state shared between all symbolic execution runs /// * `isa_config` - The architecture specific configuration information /// * `cache_dir` - A directory to cache footprint results pub fn footprint_analysis<'ir, B, P>( num_threads: usize, thread_buckets: &[Vec<EvPath<B>>], lets: &Bindings<'ir, B>, regs: &Bindings<'ir, B>, shared_state: &SharedState<B>, isa_config: &ISAConfig<B>, cache_dir: P, ) -> Result<HashMap<B, Footprint>, FootprintError> where B: BV, P: AsRef<Path>, { use FootprintError::*; let mut concrete_opcodes: HashSet<B> = HashSet::new(); let mut footprints = HashMap::new(); for thread in thread_buckets { for path in thread { for event in path { match event { Event::Instr(Val::Bits(bv)) => { if let Some(footprint) = Footprint::from_cache(Footprintkey { opcode: bv.to_string() }, cache_dir.as_ref()) { footprints.insert(*bv, footprint); } else { concrete_opcodes.insert(*bv); } } Event::Instr(_) => return Err(SymbolicInstruction), _ => (), } } } } log!(log::VERBOSE, &format!("Got {} uncached concrete opcodes for footprint analysis", concrete_opcodes.len())); let function_id = match shared_state.symtab.get("zisla_footprint") { Some(id) => id, None => return Err(NoIslaFootprintFn), }; let (args, _, instrs) = shared_state.functions.get(&function_id).expect("isla_footprint function not in shared state!"); let (task_opcodes, tasks): (Vec<B>, Vec<_>) = concrete_opcodes .iter() .enumerate() .map(|(i, opcode)| { ( opcode, LocalFrame::new(function_id, args, Some(&[Val::Bits(*opcode)]), instrs) .add_lets(lets) .add_regs(regs) .task(i), ) }) .unzip(); let mut footprint_buckets: Vec<Vec<EvPath<B>>> = vec![Vec::new(); tasks.len()]; let queue = Arc::new(SegQueue::new()); let now = Instant::now(); executor::start_multi(num_threads, None, tasks, &shared_state, queue.clone(), &executor::footprint_collector); log!(log::VERBOSE, &format!("Footprint analysis symbolic execution took: {}ms", now.elapsed().as_millis())); loop { match queue.pop() { Ok(Ok((task_id, mut events))) => { let mut events: Vec<Event<B>> = events .drain(..) .rev() // The first cycle is reserved for initialization .skip_while(|ev| !ev.is_cycle()) .filter(|ev| ev.is_reg() || ev.is_memory() || ev.is_branch() || ev.is_smt() || ev.is_fork()) .collect(); isla_lib::simplify::remove_unused(&mut events); footprint_buckets[task_id].push(events) } // Error during execution Ok(Err(msg)) => return Err(ExecutionError(msg)), // Empty queue Err(_) => break, } } let num_footprints: usize = footprint_buckets.iter().map(|instr_paths| instr_paths.len()).sum(); log!(log::VERBOSE, &format!("There are {} footprints", num_footprints)); let read_exclusives: Vec<usize> = isa_config.read_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); let write_exclusives: Vec<usize> = isa_config.write_exclusives.iter().map(|k| shared_state.enum_member(*k).unwrap()).collect(); for (i, paths) in footprint_buckets.iter().enumerate() { let opcode = task_opcodes[i]; log!(log::VERBOSE, &format!("{:?}", opcode)); let mut footprint = Footprint::new(); for events in paths { let evrefs = EventReferences::from_events(events); let mut forks: Vec<Sym> = Vec::new(); for event in events { match event { Event::Fork(_, v, _) => forks.push(*v), Event::ReadReg(reg, accessor, _) if !isa_config.ignored_registers.contains(reg) => { footprint.register_reads.insert((*reg, accessor.clone())); } Event::WriteReg(reg, accessor, data) if !isa_config.ignored_registers.contains(reg) => { footprint.register_writes.insert((*reg, accessor.clone())); // If the data written to the register is tainted by a value read // from memory record this fact. if evrefs.value_taints(data, events).1 { footprint.register_writes_tainted.insert((*reg, accessor.clone())); } } Event::MarkReg { reg, mark } => { if mark == "ignore_write" { footprint.register_writes_ignored.insert(*reg); } } Event::ReadMem { address, .. } => { footprint.is_load = true; if read_exclusives.iter().any(|rk| event.has_read_kind(*rk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::WriteMem { address, data, .. } => { footprint.is_store = true; if write_exclusives.iter().any(|wk| event.has_write_kind(*wk)) { footprint.is_exclusive = true; } evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ); evrefs.collect_value_taints( data, events, &mut footprint.write_data_taints.0, &mut footprint.write_data_taints.1, ); } Event::CacheOp { address, .. } => { footprint.is_cache_op = true; evrefs.collect_value_taints( address, events, &mut footprint.mem_addr_taints.0, &mut footprint.mem_addr_taints.1, ) } Event::Branch { address } => { footprint.is_branch = true; evrefs.collect_value_taints( address, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ); for v in &forks { evrefs.collect_taints( *v, events, &mut footprint.branch_addr_taints.0, &mut footprint.branch_addr_taints.1, ) } } _ => (), } } } footprint.cache(Footprintkey { opcode: opcode.to_string() }, cache_dir.as_ref()); footprints.insert(opcode, footprint); } Ok(footprints) }
{ return false; }
conditional_block
express.js
// file: express.js // start: node express.js // install: npm i -S express // to see: in browser, url=localhost:3000 // thx to https://flaviocopes.com/ /** Install * ----------------- * * npm init * npm i -S express * yarn init * yarn add express */ const express = require('express'); const app = express(); var cors = require('cors'); const session = require('express-session'); const { check } = require('express-validator/check') // for url like localhost:3000 eg1_hello_word = () => { app.get('/', (req, res) => res.send('Hello World!')) app.listen(3000, () => console.log('Server ready')) }; // for url like localhost:3000/?name=flavio&age=35 eg2_query_parameter = () => { app.get('/', (req, res) => { console.log('query: all') console.log('--------------------') console.log(req.query) console.log('query: one by one') console.log('--------------------') for (const key in req.query) { console.log(key, ': ', req.query[key]) } console.log(`req.query.name: ${req.query.name}`) console.log('--------------------') res.end() }); app.listen(3000) }; eg3_post_query = () => { // for Content-Type: application/json // if header = app.use(express.json()); // for Content-Type: application/x-www-form-urlencoded // if header = app.use(express.urlencoded()); app.post('/form', (req, res) => { const name = req.body.name; }); }; eg4_response = () => { app.get('/answer', (req, res) => { // if give text Content-Type: text/html // if give object Content-Type: application/json res.send({"coucou": "walou"}); }); app.get('/200', (req, res) => { res.sendStatus(200); // <=> res.status(200).send('Ok'); }); app.get('/403', (req, res) => { res.sendStatus(403); // <=> res.status(403).send('Forbidden'); }); app.get('/404', (req, res) => { res.sendStatus(404); // <=> res.status(404).send('File not found'); }); app.get('/500', (req, res) => { res.sendStatus(500) // === res.status(500).send('Internal Server Error') }); app.get('/json', (req, res) => { res.json({"coucou": "walou"}); }); app.listen(3000, () => console.log('Server ready')) }; eg5_header = () => { app.get('/json', (req, res) => { res.set('content-type', 'application/json'); res.type('json'); res.end(); }); app.listen(3000, () => console.log('Server ready')) }; eg6_redirect = () => { // made a 302 app.get('/1', (req, res) => { res.redirect('/2'); }); app.get('/2', (req, res) => { res.redirect(301, '/3/hey'); }); app.get('/3/hey', (req, res) => { res.redirect(301, '/..'); }); app.get('/3', (req, res) => { res.end(); }); app.get('/back', (req, res) => { res.redirect('back'); }); app.listen(3000, () => console.log('Server ready')) }; eg7_routing_parameters = () => { app.get('/uppercase/:theValue', (req, res) => { res.send(req.params.theValue.toUpperCase()); }); // regex will match /post , /post/first , /thepost , /posting/something , and so on. app.get(/post/, (req, res) => res.end()) app.listen(3000, () => console.log('Server ready')) }; eg8_cors = () => { const whitelist = ['http://example1.com', 'http://example2.com'] const corsOptions = { origin: function(origin, callback) { if (whitelist.indexOf(origin) !== -1)
else { callback(new Error('Not allowed by CORS')) } } } app.get('/with-cors', cors(corsOptions), (req, res, next) => { res.json({ msg: 'WHOAH with CORS it works!' }); }); app.listen(3000, () => console.log('Server ready')) }; eg9_prefligth = () => { //allow OPTIONS on just one resource app.options('/the/resource/you/request', cors()); //allow OPTIONS on all resources app.options('*', cors()); }; eg10_middleware = () => { const myMiddleware = (req, res, next) => { next() }; app.get('/', myMiddleware, (req, res) => res.send('Hello World!')); }; eg11_static_file = () => { app.get('/', (req, res) => res.download('./yarn.lock')); app.get('/', (req, res) => res.download('./yarn.lock', './dat_spam.txt')); app.listen(3000, () => console.log('Server ready')); }; eg12_session = () => { app.use(session({ 'secret': '343ji43j4n3jn4jk3n', })); app.get('/', (req, res, next) => { req.session.name = 'Flavio' res.send(req.session); console.log(req.session); }); app.listen(3000, () => console.log('Server ready')); }; eg13_validating_input = () => { app.post('/form', [ check('name') .isAlpha() .withMessage('Must be only alphabetical chars') .isLength({ min: 10 }) .withMessage('Must be at least 10 chars long'), check('email') .isEmail() .custom(email => { if (alreadyHaveEmail(email)) { throw new Error('Email already registered') } }), check('age').isNumeric() ], (req, res) => { const name = req.body.name; const email = req.body.email; const age = req.body.age; }); app.listen(3000, () => console.log('Server ready')); }; eg14_sanitizing = () => { app.use(express.json()); // trim() trims characters (whitespace by default) at the beginning and at the end of a string // escape() replaces < , > , & , ' , " and / with their corresponding HTML entities // normalizeEmail() canonicalizes an email address. Accepts several options to lowercase email addresses or subaddresses (e.g. [email protected] ) // blacklist() remove characters that appear in the blacklist // whitelist() remove characters that do not appear in the whitelist // unescape() replaces HTML encoded entities with < , > , & , ' , " and / // ltrim() like trim(), but only trims characters at the start of the string // rtrim() like trim(), but only trims characters at the end of the string // stripLow() remove ASCII control characters, which are normally invisible const sanitizeValue = value => { return value; }; app.post('/form', [ check('name') .isAlpha() .withMessage('Must be only alphabetical chars') .isLength({ min: 10 }) .withMessage('Must be at least 10 chars long') .trim().escape(), check('email') .isEmail() .custom(email => { if (alreadyHaveEmail(email)) { throw new Error('Email already registered') } }) .normalizeEmail(), check('age').isNumeric() .trim().escape(), check('value').customSanitizer(value => sanitizeValue(value)), ], (req, res) => { const name = req.body.name; const email = req.body.email; const age = req.body.age; }); app.listen(3000, () => console.log('Server ready')); }; eg15_handling_form = () => { // wtf ... don t understand app.listen(3000, () => console.log('Server ready')); }; eg16_file_upload = () => { app.listen(3000, () => console.log('Server ready')); }; eg17_https = () => { /** * with openssl generate certs * `openssl req -nodes -new -x509 -keyout server.key -out server.cert` * Just remember to set this to localhost */ const https = require('https'); const fs = require('fs'); app.get('/', (req, res) => { res.send('Hello HTTPS!'); }); https.createServer({ key: fs.readFileSync('server.key'), cert: fs.readFileSync('server.cert') }, app).listen(3000, () => { console.log('Listening...'); }); }; eg18_let_s_encrypt = () => { /** * install certbot * ``` * sudo add-apt repository ppa:certbot/certbot * sudo apt-get update * sudo apt-get install certbot * ``` * * generate cert * certbot certonly --manual * * active renewal * 0 */12 * * * root /usr/local/bin/certbot renew >/dev/null 2>&1 */ app.use(express.static(__dirname + '/static', { dotfiles: 'allow' } )) const fs = require('fs') const https = require('https') const app = express() app.get('/', (req, res) => { res.send('Hello HTTPS!') }); https.createServer({ key: fs.readFileSync('/etc/letsencrypt/path/to/key.pem'), cert: fs.readFileSync('/etc/letsencrypt/path/to/cert.pem'), ca: fs.readFileSync('/etc/letsencrypt/path/to/chain.pem') }, app).listen(443, () => { console.log('Listening...') }); } exemple_sumary = (sumary_number) => { switch (sumary_number) { case 1: { eg1_hello_word(); break; } case 2: { eg2_query_parameter(); break; } case 3: { eg3_post_query(); break; } case 4: { eg4_response(); break; } case 5: { eg5_header(); break; } case 6: { eg6_redirect(); break; } case 7: { eg7_routing_parameters(); break; } case 8: { eg8_cors(); break; } case 9: { eg9_prefligth(); break; } case 10: { eg10_middleware(); break; } case 11: { eg11_static_file(); break; } case 12: { eg12_session(); break; } case 13: { eg13_validating_input(); break; } case 14: { eg14_sanitizing(); break; } case 15: { eg15_handling_form(); break; } case 16: { eg16_file_upload(); break; } case 17: { eg17_https(); break; } case 18: { eg18_let_s_encrypt(); break; } } } exemple_sumary(12);
{ callback(null, true) }
conditional_block
express.js
// file: express.js // start: node express.js // install: npm i -S express // to see: in browser, url=localhost:3000 // thx to https://flaviocopes.com/ /** Install * ----------------- * * npm init * npm i -S express * yarn init * yarn add express */ const express = require('express'); const app = express(); var cors = require('cors'); const session = require('express-session'); const { check } = require('express-validator/check') // for url like localhost:3000 eg1_hello_word = () => { app.get('/', (req, res) => res.send('Hello World!')) app.listen(3000, () => console.log('Server ready')) }; // for url like localhost:3000/?name=flavio&age=35 eg2_query_parameter = () => { app.get('/', (req, res) => { console.log('query: all') console.log('--------------------') console.log(req.query) console.log('query: one by one') console.log('--------------------') for (const key in req.query) { console.log(key, ': ', req.query[key]) } console.log(`req.query.name: ${req.query.name}`)
res.end() }); app.listen(3000) }; eg3_post_query = () => { // for Content-Type: application/json // if header = app.use(express.json()); // for Content-Type: application/x-www-form-urlencoded // if header = app.use(express.urlencoded()); app.post('/form', (req, res) => { const name = req.body.name; }); }; eg4_response = () => { app.get('/answer', (req, res) => { // if give text Content-Type: text/html // if give object Content-Type: application/json res.send({"coucou": "walou"}); }); app.get('/200', (req, res) => { res.sendStatus(200); // <=> res.status(200).send('Ok'); }); app.get('/403', (req, res) => { res.sendStatus(403); // <=> res.status(403).send('Forbidden'); }); app.get('/404', (req, res) => { res.sendStatus(404); // <=> res.status(404).send('File not found'); }); app.get('/500', (req, res) => { res.sendStatus(500) // === res.status(500).send('Internal Server Error') }); app.get('/json', (req, res) => { res.json({"coucou": "walou"}); }); app.listen(3000, () => console.log('Server ready')) }; eg5_header = () => { app.get('/json', (req, res) => { res.set('content-type', 'application/json'); res.type('json'); res.end(); }); app.listen(3000, () => console.log('Server ready')) }; eg6_redirect = () => { // made a 302 app.get('/1', (req, res) => { res.redirect('/2'); }); app.get('/2', (req, res) => { res.redirect(301, '/3/hey'); }); app.get('/3/hey', (req, res) => { res.redirect(301, '/..'); }); app.get('/3', (req, res) => { res.end(); }); app.get('/back', (req, res) => { res.redirect('back'); }); app.listen(3000, () => console.log('Server ready')) }; eg7_routing_parameters = () => { app.get('/uppercase/:theValue', (req, res) => { res.send(req.params.theValue.toUpperCase()); }); // regex will match /post , /post/first , /thepost , /posting/something , and so on. app.get(/post/, (req, res) => res.end()) app.listen(3000, () => console.log('Server ready')) }; eg8_cors = () => { const whitelist = ['http://example1.com', 'http://example2.com'] const corsOptions = { origin: function(origin, callback) { if (whitelist.indexOf(origin) !== -1) { callback(null, true) } else { callback(new Error('Not allowed by CORS')) } } } app.get('/with-cors', cors(corsOptions), (req, res, next) => { res.json({ msg: 'WHOAH with CORS it works!' }); }); app.listen(3000, () => console.log('Server ready')) }; eg9_prefligth = () => { //allow OPTIONS on just one resource app.options('/the/resource/you/request', cors()); //allow OPTIONS on all resources app.options('*', cors()); }; eg10_middleware = () => { const myMiddleware = (req, res, next) => { next() }; app.get('/', myMiddleware, (req, res) => res.send('Hello World!')); }; eg11_static_file = () => { app.get('/', (req, res) => res.download('./yarn.lock')); app.get('/', (req, res) => res.download('./yarn.lock', './dat_spam.txt')); app.listen(3000, () => console.log('Server ready')); }; eg12_session = () => { app.use(session({ 'secret': '343ji43j4n3jn4jk3n', })); app.get('/', (req, res, next) => { req.session.name = 'Flavio' res.send(req.session); console.log(req.session); }); app.listen(3000, () => console.log('Server ready')); }; eg13_validating_input = () => { app.post('/form', [ check('name') .isAlpha() .withMessage('Must be only alphabetical chars') .isLength({ min: 10 }) .withMessage('Must be at least 10 chars long'), check('email') .isEmail() .custom(email => { if (alreadyHaveEmail(email)) { throw new Error('Email already registered') } }), check('age').isNumeric() ], (req, res) => { const name = req.body.name; const email = req.body.email; const age = req.body.age; }); app.listen(3000, () => console.log('Server ready')); }; eg14_sanitizing = () => { app.use(express.json()); // trim() trims characters (whitespace by default) at the beginning and at the end of a string // escape() replaces < , > , & , ' , " and / with their corresponding HTML entities // normalizeEmail() canonicalizes an email address. Accepts several options to lowercase email addresses or subaddresses (e.g. [email protected] ) // blacklist() remove characters that appear in the blacklist // whitelist() remove characters that do not appear in the whitelist // unescape() replaces HTML encoded entities with < , > , & , ' , " and / // ltrim() like trim(), but only trims characters at the start of the string // rtrim() like trim(), but only trims characters at the end of the string // stripLow() remove ASCII control characters, which are normally invisible const sanitizeValue = value => { return value; }; app.post('/form', [ check('name') .isAlpha() .withMessage('Must be only alphabetical chars') .isLength({ min: 10 }) .withMessage('Must be at least 10 chars long') .trim().escape(), check('email') .isEmail() .custom(email => { if (alreadyHaveEmail(email)) { throw new Error('Email already registered') } }) .normalizeEmail(), check('age').isNumeric() .trim().escape(), check('value').customSanitizer(value => sanitizeValue(value)), ], (req, res) => { const name = req.body.name; const email = req.body.email; const age = req.body.age; }); app.listen(3000, () => console.log('Server ready')); }; eg15_handling_form = () => { // wtf ... don t understand app.listen(3000, () => console.log('Server ready')); }; eg16_file_upload = () => { app.listen(3000, () => console.log('Server ready')); }; eg17_https = () => { /** * with openssl generate certs * `openssl req -nodes -new -x509 -keyout server.key -out server.cert` * Just remember to set this to localhost */ const https = require('https'); const fs = require('fs'); app.get('/', (req, res) => { res.send('Hello HTTPS!'); }); https.createServer({ key: fs.readFileSync('server.key'), cert: fs.readFileSync('server.cert') }, app).listen(3000, () => { console.log('Listening...'); }); }; eg18_let_s_encrypt = () => { /** * install certbot * ``` * sudo add-apt repository ppa:certbot/certbot * sudo apt-get update * sudo apt-get install certbot * ``` * * generate cert * certbot certonly --manual * * active renewal * 0 */12 * * * root /usr/local/bin/certbot renew >/dev/null 2>&1 */ app.use(express.static(__dirname + '/static', { dotfiles: 'allow' } )) const fs = require('fs') const https = require('https') const app = express() app.get('/', (req, res) => { res.send('Hello HTTPS!') }); https.createServer({ key: fs.readFileSync('/etc/letsencrypt/path/to/key.pem'), cert: fs.readFileSync('/etc/letsencrypt/path/to/cert.pem'), ca: fs.readFileSync('/etc/letsencrypt/path/to/chain.pem') }, app).listen(443, () => { console.log('Listening...') }); } exemple_sumary = (sumary_number) => { switch (sumary_number) { case 1: { eg1_hello_word(); break; } case 2: { eg2_query_parameter(); break; } case 3: { eg3_post_query(); break; } case 4: { eg4_response(); break; } case 5: { eg5_header(); break; } case 6: { eg6_redirect(); break; } case 7: { eg7_routing_parameters(); break; } case 8: { eg8_cors(); break; } case 9: { eg9_prefligth(); break; } case 10: { eg10_middleware(); break; } case 11: { eg11_static_file(); break; } case 12: { eg12_session(); break; } case 13: { eg13_validating_input(); break; } case 14: { eg14_sanitizing(); break; } case 15: { eg15_handling_form(); break; } case 16: { eg16_file_upload(); break; } case 17: { eg17_https(); break; } case 18: { eg18_let_s_encrypt(); break; } } } exemple_sumary(12);
console.log('--------------------')
random_line_split
orchestrator.go
// Package orchestrator is an algorithm that manages the work of a cluster of // nodes. It ensures each piece of work has a worker assigned to it. // // The Orchestrator stores a set of expected tasks. Each term, it reaches out // to the cluster to gather what each node is working on. These tasks are // called the actual tasks. The Orchestrator adjusts the nodes workload to // attempt to match the expected tasks. // // The expected workload is stored in memory. Therefore, if the process is // restarted the task list is lost. A system with persistence is required to // ensure the workload is not lost (e.g., database). package orchestrator import ( "context" "io/ioutil" "log" "sync" "time" ) // Communicator manages the internal communication between the Orchestrator and // the node cluster. Each method must be safe to call on many go-routines. // The given context represents the state of the term. Therefore, the // Communicator is expected to cancel immediately if the context is done. type Communicator interface { // List returns the workload from the given worker. List(ctx context.Context) ([]interface{}, error) // Add adds the given task to the worker. The error only logged (for now). // It is assumed that if the worker returns an error trying to update, the // next term will fix the problem and move the task elsewhere. Add(ctx context.Context, taskDefinition interface{}) error // Removes the given task from the worker. The error is only logged (for // now). It is assumed that if the worker is returning an error, then it // is either not doing the task because the worker is down, or there is a // network partition and a future term will fix the problem. Remove(ctx context.Context, taskDefinition interface{}) error } type Worker struct { Identifier interface{} Communicator } // Orchestrator stores the expected workload and reaches out to the cluster // to see what the actual workload is. It then tries to fix the delta. // // The expected task list can be altered via AddTask, RemoveTask and // UpdateTasks. Each method is safe to be called on multiple go-routines. type Orchestrator struct { log Logger s func(TermStats) timeout time.Duration mu sync.Mutex workers []Worker expectedTasks []Task // LastActual is set each term. It is only used for a user who wants to // know the state of the worker cluster from the last term. lastActual []WorkerState } // New creates a new Orchestrator. func New(opts ...OrchestratorOption) *Orchestrator { o := &Orchestrator{ s: func(TermStats) {}, log: log.New(ioutil.Discard, "", 0), timeout: 10 * time.Second, } for _, opt := range opts { opt(o) } return o } // NextTerm reaches out to the cluster to gather to actual workload. It then // attempts to fix the delta between actual and expected. The lifecycle of // the term is managed by the given context. func (o *Orchestrator) NextTerm(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() // Gather the state of the world from the workers. actual := o.collectActual(ctx) toAdd, toRemove := o.delta(actual) // Rebalance tasks among workers. toAdd, toRemove = rebalance(toAdd, toRemove, actual) counts := counts(actual, toRemove) for worker, tasks := range toRemove { for _, task := range tasks { // Remove the task from the workers. removeCtx, _ := context.WithTimeout(ctx, o.timeout) worker.Remove(removeCtx, task) } } for taskDefinition, missing := range toAdd { history := make(map[Worker]bool) for i := 0; i < missing; i++ { counts = o.assignTask(ctx, taskDefinition, counts, actual, history, ) } } o.s(TermStats{ WorkerCount: len(actual), }) } // collectActual reaches out to each worker and gets their state of the world. // Each worker is queried in parallel. If a worker returns an error while // trying to list the tasks, it will be logged and not considered for what // workers should be assigned work. func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} { type result struct { worker Worker actual []interface{} err error } listCtx, _ := context.WithTimeout(ctx, o.timeout) results := make(chan result, len(o.workers)) errs := make(chan result, len(o.workers)) for _, worker := range o.workers { go func(worker Worker) { listResults, err := worker.List(listCtx) if err != nil { errs <- result{worker: worker, err: err} return } results <- result{worker: worker, actual: listResults} }(worker) } t := time.NewTimer(o.timeout) var state []WorkerState actual := make(map[Worker][]interface{}) for i := 0; i < len(o.workers); i++ { select { case <-ctx.Done(): break case nextResult := <-results: actual[nextResult.worker] = nextResult.actual state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual}) case err := <-errs: o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err) case <-t.C: o.log.Printf("Communicator timeout. Using results available...") break } } o.lastActual = state return actual } // delta finds what should be added and removed to make actual match the // expected. func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) { toAdd = make(map[interface{}]int) toRemove = make(map[Worker][]interface{}) expectedTasks := make([]Task, len(o.expectedTasks)) copy(expectedTasks, o.expectedTasks) for _, task := range o.expectedTasks { needs := hasEnoughInstances(task, actual) if needs == 0 { continue } toAdd[task.Definition] = needs } for worker, tasks := range actual { for _, task := range tasks { if idx := containsTask(task, expectedTasks); idx >= 0 { expectedTasks[idx].Instances-- if expectedTasks[idx].Instances == 0 { expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...) } continue } toRemove[worker] = append(toRemove[worker], task) } } return toAdd, toRemove } // assignTask tries to find a worker that does not have too many tasks // assigned. If it encounters a worker with too many tasks, it will remove // it from the pool and try again. func (o *Orchestrator) assignTask( ctx context.Context, taskDefinition interface{}, workerLoads []workerLoad, actual map[Worker][]interface{}, history map[Worker]bool, ) []workerLoad { activeWorkers := len(actual) if activeWorkers == 0 { return workerLoads } totalTasks := o.totalTaskCount() maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers for i, loadInfo := range workerLoads { // Ensure that each worker gets an even amount of work assigned. // Therefore if a worker gets its fair share, remove it from the worker // pool for this term. This also accounts for there being a non-divisible // amount of tasks per workers. loadInfo.taskCount++ if loadInfo.taskCount > maxTaskCount { workerLoads = append(workerLoads[:i], workerLoads[i+1:]...) // Recurse since the worker pool was adjusted and the task was // not assigned. return o.assignTask(ctx, taskDefinition, workerLoads, actual, history) } // Ensure we haven't assigned this task to the worker already. if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 { continue } history[loadInfo.worker] = true // Assign the task to the worker. o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker) addCtx, _ := context.WithTimeout(ctx, o.timeout) loadInfo.worker.Add(addCtx, taskDefinition) // Move updated count to end of slice to help with fairness workerLoads = append( append(workerLoads[:i], workerLoads[i+1:]...), workerLoad{ worker: loadInfo.worker, taskCount: loadInfo.taskCount, }, ) break } return workerLoads } // totalTaskCount calculates the total number of expected task instances. func (o *Orchestrator) totalTaskCount() int { var total int for _, t := range o.expectedTasks { total += t.Instances } return total } // AddWorker adds a worker to the known worker cluster. The update will not // take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) AddWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this worker idx := containsWorker(worker, o.workers) if idx > -1 { return } o.workers = append(o.workers, worker) } // RemoveWorker removes a worker from the known worker cluster. The update // will not take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) RemoveWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() idx := containsWorker(worker, o.workers) if idx < 0 { return } o.workers = append(o.workers[:idx], o.workers[idx+1:]...) } // UpdateWorkers overwrites the expected worker list. The update will not take // affect until the next term. It is safe to invoke AddWorker, RemoveWorker // and UpdateWorkers on multiple go-routines. func (o *Orchestrator) UpdateWorkers(workers []Worker) { o.mu.Lock() defer o.mu.Unlock() o.workers = workers } // Task stores the required information for a task. type Task struct { Definition interface{} Instances int } // AddTask adds a new task to the expected workload. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this task for _, t := range o.expectedTasks { if taskDefinition == t.Definition
} t := Task{Definition: taskDefinition, Instances: 1} for _, opt := range opts { opt(&t) } o.expectedTasks = append(o.expectedTasks, t) } // TaskOption is used to configure a task when it is being added. type TaskOption func(*Task) // WithTaskInstances configures the number of tasks. Defaults to 1. func WithTaskInstances(i int) TaskOption { return func(t *Task) { t.Instances = i } } // RemoveTask removes a task from the expected workload. The update will not // take affect until the next term. It is safe to invoke AddTask, RemoveTask // and UpdateTasks on multiple go-routines. func (o *Orchestrator) RemoveTask(taskDefinition interface{}) { o.mu.Lock() defer o.mu.Unlock() idx := containsTask(taskDefinition, o.expectedTasks) if idx < 0 { return } o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...) } // UpdateTasks overwrites the expected task list. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) UpdateTasks(tasks []Task) { o.mu.Lock() defer o.mu.Unlock() o.expectedTasks = tasks } // ListExpectedTasks returns the current list of the expected tasks. func (o *Orchestrator) ListExpectedTasks() []Task { o.mu.Lock() defer o.mu.Unlock() return o.expectedTasks } // WorkerState stores the state of a worker. type WorkerState struct { Worker Worker // Tasks are the task definitions the worker is servicing. Tasks []interface{} } // LastActual returns the actual from the last term. It will return nil // before the first term. func (o *Orchestrator) LastActual() []WorkerState { o.mu.Lock() defer o.mu.Unlock() return o.lastActual } // rebalance will rebalance tasks across the workers. If any worker has too // many tasks, it will be added to the remove map, and added to the returned // add slice. func rebalance( toAdd map[interface{}]int, toRemove, actual map[Worker][]interface{}, ) (map[interface{}]int, map[Worker][]interface{}) { counts := counts(actual, toRemove) if len(counts) == 0 { return toAdd, toRemove } var total int for _, c := range counts { total += c.taskCount } for _, addCount := range toAdd { total += addCount } maxPerNode := total / len(counts) if maxPerNode == 0 || total%len(counts) != 0 { maxPerNode++ } for _, c := range counts { if c.taskCount > maxPerNode { task := actual[c.worker][0] toRemove[c.worker] = append(toRemove[c.worker], task) toAdd[task]++ } } return toAdd, toRemove } // hasEnoughInstances looks at each task in the given actual list and ensures // a worker node is servicing the task. func hasEnoughInstances(t Task, actual map[Worker][]interface{}) (needs int) { var count int for _, a := range actual { if contains(t.Definition, a) >= 0 { count++ } } return t.Instances - count } // contains returns the index of the given interface{} (x) in the slice y. If the // interface{} is not present in the slice, it returns -1. func contains(x interface{}, y []interface{}) int { for i, t := range y { if t == x { return i } } return -1 } // containsTask returns the index of the given task name in the tasks. If the // task is not found, it returns -1. func containsTask(task interface{}, tasks []Task) int { for i, t := range tasks { if t.Definition == task { return i } } return -1 } // containsWorker returns the index of the given worker name in the workers. If the // worker is not found, it returns -1. func containsWorker(worker Worker, workers []Worker) int { for i, w := range workers { if w.Identifier == worker.Identifier { return i } } return -1 }
{ return }
conditional_block
orchestrator.go
// Package orchestrator is an algorithm that manages the work of a cluster of // nodes. It ensures each piece of work has a worker assigned to it. // // The Orchestrator stores a set of expected tasks. Each term, it reaches out // to the cluster to gather what each node is working on. These tasks are // called the actual tasks. The Orchestrator adjusts the nodes workload to // attempt to match the expected tasks. // // The expected workload is stored in memory. Therefore, if the process is // restarted the task list is lost. A system with persistence is required to // ensure the workload is not lost (e.g., database). package orchestrator import ( "context" "io/ioutil" "log" "sync" "time" ) // Communicator manages the internal communication between the Orchestrator and // the node cluster. Each method must be safe to call on many go-routines. // The given context represents the state of the term. Therefore, the // Communicator is expected to cancel immediately if the context is done. type Communicator interface { // List returns the workload from the given worker. List(ctx context.Context) ([]interface{}, error) // Add adds the given task to the worker. The error only logged (for now). // It is assumed that if the worker returns an error trying to update, the // next term will fix the problem and move the task elsewhere. Add(ctx context.Context, taskDefinition interface{}) error // Removes the given task from the worker. The error is only logged (for // now). It is assumed that if the worker is returning an error, then it // is either not doing the task because the worker is down, or there is a // network partition and a future term will fix the problem. Remove(ctx context.Context, taskDefinition interface{}) error } type Worker struct { Identifier interface{} Communicator } // Orchestrator stores the expected workload and reaches out to the cluster // to see what the actual workload is. It then tries to fix the delta. // // The expected task list can be altered via AddTask, RemoveTask and // UpdateTasks. Each method is safe to be called on multiple go-routines. type Orchestrator struct { log Logger s func(TermStats) timeout time.Duration mu sync.Mutex workers []Worker expectedTasks []Task // LastActual is set each term. It is only used for a user who wants to // know the state of the worker cluster from the last term. lastActual []WorkerState } // New creates a new Orchestrator. func New(opts ...OrchestratorOption) *Orchestrator { o := &Orchestrator{ s: func(TermStats) {}, log: log.New(ioutil.Discard, "", 0), timeout: 10 * time.Second, } for _, opt := range opts { opt(o) } return o } // NextTerm reaches out to the cluster to gather to actual workload. It then // attempts to fix the delta between actual and expected. The lifecycle of // the term is managed by the given context. func (o *Orchestrator) NextTerm(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() // Gather the state of the world from the workers. actual := o.collectActual(ctx) toAdd, toRemove := o.delta(actual) // Rebalance tasks among workers. toAdd, toRemove = rebalance(toAdd, toRemove, actual) counts := counts(actual, toRemove) for worker, tasks := range toRemove { for _, task := range tasks { // Remove the task from the workers. removeCtx, _ := context.WithTimeout(ctx, o.timeout) worker.Remove(removeCtx, task) } } for taskDefinition, missing := range toAdd { history := make(map[Worker]bool) for i := 0; i < missing; i++ { counts = o.assignTask(ctx, taskDefinition, counts, actual, history, ) } } o.s(TermStats{ WorkerCount: len(actual), }) } // collectActual reaches out to each worker and gets their state of the world. // Each worker is queried in parallel. If a worker returns an error while // trying to list the tasks, it will be logged and not considered for what // workers should be assigned work. func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} { type result struct { worker Worker actual []interface{} err error } listCtx, _ := context.WithTimeout(ctx, o.timeout) results := make(chan result, len(o.workers)) errs := make(chan result, len(o.workers)) for _, worker := range o.workers { go func(worker Worker) { listResults, err := worker.List(listCtx) if err != nil { errs <- result{worker: worker, err: err} return } results <- result{worker: worker, actual: listResults} }(worker) } t := time.NewTimer(o.timeout) var state []WorkerState actual := make(map[Worker][]interface{}) for i := 0; i < len(o.workers); i++ { select { case <-ctx.Done(): break case nextResult := <-results: actual[nextResult.worker] = nextResult.actual state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual}) case err := <-errs: o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err) case <-t.C: o.log.Printf("Communicator timeout. Using results available...") break } } o.lastActual = state return actual } // delta finds what should be added and removed to make actual match the // expected. func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) { toAdd = make(map[interface{}]int) toRemove = make(map[Worker][]interface{}) expectedTasks := make([]Task, len(o.expectedTasks)) copy(expectedTasks, o.expectedTasks) for _, task := range o.expectedTasks { needs := hasEnoughInstances(task, actual) if needs == 0 { continue } toAdd[task.Definition] = needs } for worker, tasks := range actual { for _, task := range tasks { if idx := containsTask(task, expectedTasks); idx >= 0 { expectedTasks[idx].Instances-- if expectedTasks[idx].Instances == 0 { expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...) } continue } toRemove[worker] = append(toRemove[worker], task) } } return toAdd, toRemove } // assignTask tries to find a worker that does not have too many tasks // assigned. If it encounters a worker with too many tasks, it will remove // it from the pool and try again. func (o *Orchestrator) assignTask( ctx context.Context, taskDefinition interface{}, workerLoads []workerLoad, actual map[Worker][]interface{}, history map[Worker]bool, ) []workerLoad { activeWorkers := len(actual) if activeWorkers == 0 { return workerLoads } totalTasks := o.totalTaskCount() maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers for i, loadInfo := range workerLoads { // Ensure that each worker gets an even amount of work assigned. // Therefore if a worker gets its fair share, remove it from the worker // pool for this term. This also accounts for there being a non-divisible // amount of tasks per workers. loadInfo.taskCount++ if loadInfo.taskCount > maxTaskCount { workerLoads = append(workerLoads[:i], workerLoads[i+1:]...) // Recurse since the worker pool was adjusted and the task was // not assigned. return o.assignTask(ctx, taskDefinition, workerLoads, actual, history) } // Ensure we haven't assigned this task to the worker already. if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 { continue } history[loadInfo.worker] = true // Assign the task to the worker. o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker) addCtx, _ := context.WithTimeout(ctx, o.timeout) loadInfo.worker.Add(addCtx, taskDefinition) // Move updated count to end of slice to help with fairness workerLoads = append( append(workerLoads[:i], workerLoads[i+1:]...), workerLoad{ worker: loadInfo.worker, taskCount: loadInfo.taskCount, }, ) break } return workerLoads } // totalTaskCount calculates the total number of expected task instances. func (o *Orchestrator) totalTaskCount() int { var total int for _, t := range o.expectedTasks { total += t.Instances } return total } // AddWorker adds a worker to the known worker cluster. The update will not // take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) AddWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this worker idx := containsWorker(worker, o.workers) if idx > -1 { return } o.workers = append(o.workers, worker) } // RemoveWorker removes a worker from the known worker cluster. The update // will not take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) RemoveWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() idx := containsWorker(worker, o.workers) if idx < 0 { return } o.workers = append(o.workers[:idx], o.workers[idx+1:]...) } // UpdateWorkers overwrites the expected worker list. The update will not take // affect until the next term. It is safe to invoke AddWorker, RemoveWorker // and UpdateWorkers on multiple go-routines. func (o *Orchestrator) UpdateWorkers(workers []Worker) { o.mu.Lock() defer o.mu.Unlock() o.workers = workers } // Task stores the required information for a task. type Task struct { Definition interface{} Instances int } // AddTask adds a new task to the expected workload. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this task for _, t := range o.expectedTasks { if taskDefinition == t.Definition { return } } t := Task{Definition: taskDefinition, Instances: 1} for _, opt := range opts { opt(&t) } o.expectedTasks = append(o.expectedTasks, t) } // TaskOption is used to configure a task when it is being added. type TaskOption func(*Task) // WithTaskInstances configures the number of tasks. Defaults to 1. func WithTaskInstances(i int) TaskOption { return func(t *Task) { t.Instances = i } } // RemoveTask removes a task from the expected workload. The update will not // take affect until the next term. It is safe to invoke AddTask, RemoveTask // and UpdateTasks on multiple go-routines. func (o *Orchestrator) RemoveTask(taskDefinition interface{}) { o.mu.Lock() defer o.mu.Unlock() idx := containsTask(taskDefinition, o.expectedTasks) if idx < 0 { return } o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...) } // UpdateTasks overwrites the expected task list. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) UpdateTasks(tasks []Task)
// ListExpectedTasks returns the current list of the expected tasks. func (o *Orchestrator) ListExpectedTasks() []Task { o.mu.Lock() defer o.mu.Unlock() return o.expectedTasks } // WorkerState stores the state of a worker. type WorkerState struct { Worker Worker // Tasks are the task definitions the worker is servicing. Tasks []interface{} } // LastActual returns the actual from the last term. It will return nil // before the first term. func (o *Orchestrator) LastActual() []WorkerState { o.mu.Lock() defer o.mu.Unlock() return o.lastActual } // rebalance will rebalance tasks across the workers. If any worker has too // many tasks, it will be added to the remove map, and added to the returned // add slice. func rebalance( toAdd map[interface{}]int, toRemove, actual map[Worker][]interface{}, ) (map[interface{}]int, map[Worker][]interface{}) { counts := counts(actual, toRemove) if len(counts) == 0 { return toAdd, toRemove } var total int for _, c := range counts { total += c.taskCount } for _, addCount := range toAdd { total += addCount } maxPerNode := total / len(counts) if maxPerNode == 0 || total%len(counts) != 0 { maxPerNode++ } for _, c := range counts { if c.taskCount > maxPerNode { task := actual[c.worker][0] toRemove[c.worker] = append(toRemove[c.worker], task) toAdd[task]++ } } return toAdd, toRemove } // hasEnoughInstances looks at each task in the given actual list and ensures // a worker node is servicing the task. func hasEnoughInstances(t Task, actual map[Worker][]interface{}) (needs int) { var count int for _, a := range actual { if contains(t.Definition, a) >= 0 { count++ } } return t.Instances - count } // contains returns the index of the given interface{} (x) in the slice y. If the // interface{} is not present in the slice, it returns -1. func contains(x interface{}, y []interface{}) int { for i, t := range y { if t == x { return i } } return -1 } // containsTask returns the index of the given task name in the tasks. If the // task is not found, it returns -1. func containsTask(task interface{}, tasks []Task) int { for i, t := range tasks { if t.Definition == task { return i } } return -1 } // containsWorker returns the index of the given worker name in the workers. If the // worker is not found, it returns -1. func containsWorker(worker Worker, workers []Worker) int { for i, w := range workers { if w.Identifier == worker.Identifier { return i } } return -1 }
{ o.mu.Lock() defer o.mu.Unlock() o.expectedTasks = tasks }
identifier_body
orchestrator.go
// Package orchestrator is an algorithm that manages the work of a cluster of // nodes. It ensures each piece of work has a worker assigned to it. // // The Orchestrator stores a set of expected tasks. Each term, it reaches out // to the cluster to gather what each node is working on. These tasks are // called the actual tasks. The Orchestrator adjusts the nodes workload to // attempt to match the expected tasks. // // The expected workload is stored in memory. Therefore, if the process is // restarted the task list is lost. A system with persistence is required to // ensure the workload is not lost (e.g., database). package orchestrator import ( "context" "io/ioutil" "log" "sync" "time" ) // Communicator manages the internal communication between the Orchestrator and // the node cluster. Each method must be safe to call on many go-routines. // The given context represents the state of the term. Therefore, the // Communicator is expected to cancel immediately if the context is done. type Communicator interface { // List returns the workload from the given worker. List(ctx context.Context) ([]interface{}, error) // Add adds the given task to the worker. The error only logged (for now). // It is assumed that if the worker returns an error trying to update, the // next term will fix the problem and move the task elsewhere. Add(ctx context.Context, taskDefinition interface{}) error // Removes the given task from the worker. The error is only logged (for // now). It is assumed that if the worker is returning an error, then it // is either not doing the task because the worker is down, or there is a // network partition and a future term will fix the problem. Remove(ctx context.Context, taskDefinition interface{}) error } type Worker struct { Identifier interface{} Communicator } // Orchestrator stores the expected workload and reaches out to the cluster // to see what the actual workload is. It then tries to fix the delta. // // The expected task list can be altered via AddTask, RemoveTask and // UpdateTasks. Each method is safe to be called on multiple go-routines. type Orchestrator struct { log Logger s func(TermStats) timeout time.Duration mu sync.Mutex workers []Worker expectedTasks []Task // LastActual is set each term. It is only used for a user who wants to // know the state of the worker cluster from the last term. lastActual []WorkerState } // New creates a new Orchestrator. func New(opts ...OrchestratorOption) *Orchestrator { o := &Orchestrator{ s: func(TermStats) {}, log: log.New(ioutil.Discard, "", 0), timeout: 10 * time.Second, } for _, opt := range opts { opt(o) } return o } // NextTerm reaches out to the cluster to gather to actual workload. It then // attempts to fix the delta between actual and expected. The lifecycle of // the term is managed by the given context. func (o *Orchestrator) NextTerm(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() // Gather the state of the world from the workers. actual := o.collectActual(ctx) toAdd, toRemove := o.delta(actual) // Rebalance tasks among workers. toAdd, toRemove = rebalance(toAdd, toRemove, actual) counts := counts(actual, toRemove) for worker, tasks := range toRemove { for _, task := range tasks { // Remove the task from the workers. removeCtx, _ := context.WithTimeout(ctx, o.timeout) worker.Remove(removeCtx, task) } } for taskDefinition, missing := range toAdd { history := make(map[Worker]bool) for i := 0; i < missing; i++ { counts = o.assignTask(ctx, taskDefinition, counts, actual,
} o.s(TermStats{ WorkerCount: len(actual), }) } // collectActual reaches out to each worker and gets their state of the world. // Each worker is queried in parallel. If a worker returns an error while // trying to list the tasks, it will be logged and not considered for what // workers should be assigned work. func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} { type result struct { worker Worker actual []interface{} err error } listCtx, _ := context.WithTimeout(ctx, o.timeout) results := make(chan result, len(o.workers)) errs := make(chan result, len(o.workers)) for _, worker := range o.workers { go func(worker Worker) { listResults, err := worker.List(listCtx) if err != nil { errs <- result{worker: worker, err: err} return } results <- result{worker: worker, actual: listResults} }(worker) } t := time.NewTimer(o.timeout) var state []WorkerState actual := make(map[Worker][]interface{}) for i := 0; i < len(o.workers); i++ { select { case <-ctx.Done(): break case nextResult := <-results: actual[nextResult.worker] = nextResult.actual state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual}) case err := <-errs: o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err) case <-t.C: o.log.Printf("Communicator timeout. Using results available...") break } } o.lastActual = state return actual } // delta finds what should be added and removed to make actual match the // expected. func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) { toAdd = make(map[interface{}]int) toRemove = make(map[Worker][]interface{}) expectedTasks := make([]Task, len(o.expectedTasks)) copy(expectedTasks, o.expectedTasks) for _, task := range o.expectedTasks { needs := hasEnoughInstances(task, actual) if needs == 0 { continue } toAdd[task.Definition] = needs } for worker, tasks := range actual { for _, task := range tasks { if idx := containsTask(task, expectedTasks); idx >= 0 { expectedTasks[idx].Instances-- if expectedTasks[idx].Instances == 0 { expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...) } continue } toRemove[worker] = append(toRemove[worker], task) } } return toAdd, toRemove } // assignTask tries to find a worker that does not have too many tasks // assigned. If it encounters a worker with too many tasks, it will remove // it from the pool and try again. func (o *Orchestrator) assignTask( ctx context.Context, taskDefinition interface{}, workerLoads []workerLoad, actual map[Worker][]interface{}, history map[Worker]bool, ) []workerLoad { activeWorkers := len(actual) if activeWorkers == 0 { return workerLoads } totalTasks := o.totalTaskCount() maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers for i, loadInfo := range workerLoads { // Ensure that each worker gets an even amount of work assigned. // Therefore if a worker gets its fair share, remove it from the worker // pool for this term. This also accounts for there being a non-divisible // amount of tasks per workers. loadInfo.taskCount++ if loadInfo.taskCount > maxTaskCount { workerLoads = append(workerLoads[:i], workerLoads[i+1:]...) // Recurse since the worker pool was adjusted and the task was // not assigned. return o.assignTask(ctx, taskDefinition, workerLoads, actual, history) } // Ensure we haven't assigned this task to the worker already. if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 { continue } history[loadInfo.worker] = true // Assign the task to the worker. o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker) addCtx, _ := context.WithTimeout(ctx, o.timeout) loadInfo.worker.Add(addCtx, taskDefinition) // Move updated count to end of slice to help with fairness workerLoads = append( append(workerLoads[:i], workerLoads[i+1:]...), workerLoad{ worker: loadInfo.worker, taskCount: loadInfo.taskCount, }, ) break } return workerLoads } // totalTaskCount calculates the total number of expected task instances. func (o *Orchestrator) totalTaskCount() int { var total int for _, t := range o.expectedTasks { total += t.Instances } return total } // AddWorker adds a worker to the known worker cluster. The update will not // take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) AddWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this worker idx := containsWorker(worker, o.workers) if idx > -1 { return } o.workers = append(o.workers, worker) } // RemoveWorker removes a worker from the known worker cluster. The update // will not take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) RemoveWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() idx := containsWorker(worker, o.workers) if idx < 0 { return } o.workers = append(o.workers[:idx], o.workers[idx+1:]...) } // UpdateWorkers overwrites the expected worker list. The update will not take // affect until the next term. It is safe to invoke AddWorker, RemoveWorker // and UpdateWorkers on multiple go-routines. func (o *Orchestrator) UpdateWorkers(workers []Worker) { o.mu.Lock() defer o.mu.Unlock() o.workers = workers } // Task stores the required information for a task. type Task struct { Definition interface{} Instances int } // AddTask adds a new task to the expected workload. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this task for _, t := range o.expectedTasks { if taskDefinition == t.Definition { return } } t := Task{Definition: taskDefinition, Instances: 1} for _, opt := range opts { opt(&t) } o.expectedTasks = append(o.expectedTasks, t) } // TaskOption is used to configure a task when it is being added. type TaskOption func(*Task) // WithTaskInstances configures the number of tasks. Defaults to 1. func WithTaskInstances(i int) TaskOption { return func(t *Task) { t.Instances = i } } // RemoveTask removes a task from the expected workload. The update will not // take affect until the next term. It is safe to invoke AddTask, RemoveTask // and UpdateTasks on multiple go-routines. func (o *Orchestrator) RemoveTask(taskDefinition interface{}) { o.mu.Lock() defer o.mu.Unlock() idx := containsTask(taskDefinition, o.expectedTasks) if idx < 0 { return } o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...) } // UpdateTasks overwrites the expected task list. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) UpdateTasks(tasks []Task) { o.mu.Lock() defer o.mu.Unlock() o.expectedTasks = tasks } // ListExpectedTasks returns the current list of the expected tasks. func (o *Orchestrator) ListExpectedTasks() []Task { o.mu.Lock() defer o.mu.Unlock() return o.expectedTasks } // WorkerState stores the state of a worker. type WorkerState struct { Worker Worker // Tasks are the task definitions the worker is servicing. Tasks []interface{} } // LastActual returns the actual from the last term. It will return nil // before the first term. func (o *Orchestrator) LastActual() []WorkerState { o.mu.Lock() defer o.mu.Unlock() return o.lastActual } // rebalance will rebalance tasks across the workers. If any worker has too // many tasks, it will be added to the remove map, and added to the returned // add slice. func rebalance( toAdd map[interface{}]int, toRemove, actual map[Worker][]interface{}, ) (map[interface{}]int, map[Worker][]interface{}) { counts := counts(actual, toRemove) if len(counts) == 0 { return toAdd, toRemove } var total int for _, c := range counts { total += c.taskCount } for _, addCount := range toAdd { total += addCount } maxPerNode := total / len(counts) if maxPerNode == 0 || total%len(counts) != 0 { maxPerNode++ } for _, c := range counts { if c.taskCount > maxPerNode { task := actual[c.worker][0] toRemove[c.worker] = append(toRemove[c.worker], task) toAdd[task]++ } } return toAdd, toRemove } // hasEnoughInstances looks at each task in the given actual list and ensures // a worker node is servicing the task. func hasEnoughInstances(t Task, actual map[Worker][]interface{}) (needs int) { var count int for _, a := range actual { if contains(t.Definition, a) >= 0 { count++ } } return t.Instances - count } // contains returns the index of the given interface{} (x) in the slice y. If the // interface{} is not present in the slice, it returns -1. func contains(x interface{}, y []interface{}) int { for i, t := range y { if t == x { return i } } return -1 } // containsTask returns the index of the given task name in the tasks. If the // task is not found, it returns -1. func containsTask(task interface{}, tasks []Task) int { for i, t := range tasks { if t.Definition == task { return i } } return -1 } // containsWorker returns the index of the given worker name in the workers. If the // worker is not found, it returns -1. func containsWorker(worker Worker, workers []Worker) int { for i, w := range workers { if w.Identifier == worker.Identifier { return i } } return -1 }
history, ) }
random_line_split
orchestrator.go
// Package orchestrator is an algorithm that manages the work of a cluster of // nodes. It ensures each piece of work has a worker assigned to it. // // The Orchestrator stores a set of expected tasks. Each term, it reaches out // to the cluster to gather what each node is working on. These tasks are // called the actual tasks. The Orchestrator adjusts the nodes workload to // attempt to match the expected tasks. // // The expected workload is stored in memory. Therefore, if the process is // restarted the task list is lost. A system with persistence is required to // ensure the workload is not lost (e.g., database). package orchestrator import ( "context" "io/ioutil" "log" "sync" "time" ) // Communicator manages the internal communication between the Orchestrator and // the node cluster. Each method must be safe to call on many go-routines. // The given context represents the state of the term. Therefore, the // Communicator is expected to cancel immediately if the context is done. type Communicator interface { // List returns the workload from the given worker. List(ctx context.Context) ([]interface{}, error) // Add adds the given task to the worker. The error only logged (for now). // It is assumed that if the worker returns an error trying to update, the // next term will fix the problem and move the task elsewhere. Add(ctx context.Context, taskDefinition interface{}) error // Removes the given task from the worker. The error is only logged (for // now). It is assumed that if the worker is returning an error, then it // is either not doing the task because the worker is down, or there is a // network partition and a future term will fix the problem. Remove(ctx context.Context, taskDefinition interface{}) error } type Worker struct { Identifier interface{} Communicator } // Orchestrator stores the expected workload and reaches out to the cluster // to see what the actual workload is. It then tries to fix the delta. // // The expected task list can be altered via AddTask, RemoveTask and // UpdateTasks. Each method is safe to be called on multiple go-routines. type Orchestrator struct { log Logger s func(TermStats) timeout time.Duration mu sync.Mutex workers []Worker expectedTasks []Task // LastActual is set each term. It is only used for a user who wants to // know the state of the worker cluster from the last term. lastActual []WorkerState } // New creates a new Orchestrator. func New(opts ...OrchestratorOption) *Orchestrator { o := &Orchestrator{ s: func(TermStats) {}, log: log.New(ioutil.Discard, "", 0), timeout: 10 * time.Second, } for _, opt := range opts { opt(o) } return o } // NextTerm reaches out to the cluster to gather to actual workload. It then // attempts to fix the delta between actual and expected. The lifecycle of // the term is managed by the given context. func (o *Orchestrator) NextTerm(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() // Gather the state of the world from the workers. actual := o.collectActual(ctx) toAdd, toRemove := o.delta(actual) // Rebalance tasks among workers. toAdd, toRemove = rebalance(toAdd, toRemove, actual) counts := counts(actual, toRemove) for worker, tasks := range toRemove { for _, task := range tasks { // Remove the task from the workers. removeCtx, _ := context.WithTimeout(ctx, o.timeout) worker.Remove(removeCtx, task) } } for taskDefinition, missing := range toAdd { history := make(map[Worker]bool) for i := 0; i < missing; i++ { counts = o.assignTask(ctx, taskDefinition, counts, actual, history, ) } } o.s(TermStats{ WorkerCount: len(actual), }) } // collectActual reaches out to each worker and gets their state of the world. // Each worker is queried in parallel. If a worker returns an error while // trying to list the tasks, it will be logged and not considered for what // workers should be assigned work. func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} { type result struct { worker Worker actual []interface{} err error } listCtx, _ := context.WithTimeout(ctx, o.timeout) results := make(chan result, len(o.workers)) errs := make(chan result, len(o.workers)) for _, worker := range o.workers { go func(worker Worker) { listResults, err := worker.List(listCtx) if err != nil { errs <- result{worker: worker, err: err} return } results <- result{worker: worker, actual: listResults} }(worker) } t := time.NewTimer(o.timeout) var state []WorkerState actual := make(map[Worker][]interface{}) for i := 0; i < len(o.workers); i++ { select { case <-ctx.Done(): break case nextResult := <-results: actual[nextResult.worker] = nextResult.actual state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual}) case err := <-errs: o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err) case <-t.C: o.log.Printf("Communicator timeout. Using results available...") break } } o.lastActual = state return actual } // delta finds what should be added and removed to make actual match the // expected. func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) { toAdd = make(map[interface{}]int) toRemove = make(map[Worker][]interface{}) expectedTasks := make([]Task, len(o.expectedTasks)) copy(expectedTasks, o.expectedTasks) for _, task := range o.expectedTasks { needs := hasEnoughInstances(task, actual) if needs == 0 { continue } toAdd[task.Definition] = needs } for worker, tasks := range actual { for _, task := range tasks { if idx := containsTask(task, expectedTasks); idx >= 0 { expectedTasks[idx].Instances-- if expectedTasks[idx].Instances == 0 { expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...) } continue } toRemove[worker] = append(toRemove[worker], task) } } return toAdd, toRemove } // assignTask tries to find a worker that does not have too many tasks // assigned. If it encounters a worker with too many tasks, it will remove // it from the pool and try again. func (o *Orchestrator) assignTask( ctx context.Context, taskDefinition interface{}, workerLoads []workerLoad, actual map[Worker][]interface{}, history map[Worker]bool, ) []workerLoad { activeWorkers := len(actual) if activeWorkers == 0 { return workerLoads } totalTasks := o.totalTaskCount() maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers for i, loadInfo := range workerLoads { // Ensure that each worker gets an even amount of work assigned. // Therefore if a worker gets its fair share, remove it from the worker // pool for this term. This also accounts for there being a non-divisible // amount of tasks per workers. loadInfo.taskCount++ if loadInfo.taskCount > maxTaskCount { workerLoads = append(workerLoads[:i], workerLoads[i+1:]...) // Recurse since the worker pool was adjusted and the task was // not assigned. return o.assignTask(ctx, taskDefinition, workerLoads, actual, history) } // Ensure we haven't assigned this task to the worker already. if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 { continue } history[loadInfo.worker] = true // Assign the task to the worker. o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker) addCtx, _ := context.WithTimeout(ctx, o.timeout) loadInfo.worker.Add(addCtx, taskDefinition) // Move updated count to end of slice to help with fairness workerLoads = append( append(workerLoads[:i], workerLoads[i+1:]...), workerLoad{ worker: loadInfo.worker, taskCount: loadInfo.taskCount, }, ) break } return workerLoads } // totalTaskCount calculates the total number of expected task instances. func (o *Orchestrator) totalTaskCount() int { var total int for _, t := range o.expectedTasks { total += t.Instances } return total } // AddWorker adds a worker to the known worker cluster. The update will not // take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) AddWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this worker idx := containsWorker(worker, o.workers) if idx > -1 { return } o.workers = append(o.workers, worker) } // RemoveWorker removes a worker from the known worker cluster. The update // will not take affect until the next term. It is safe to invoke AddWorker, // RemoveWorkers and UpdateWorkers on multiple go-routines. func (o *Orchestrator) RemoveWorker(worker Worker) { o.mu.Lock() defer o.mu.Unlock() idx := containsWorker(worker, o.workers) if idx < 0 { return } o.workers = append(o.workers[:idx], o.workers[idx+1:]...) } // UpdateWorkers overwrites the expected worker list. The update will not take // affect until the next term. It is safe to invoke AddWorker, RemoveWorker // and UpdateWorkers on multiple go-routines. func (o *Orchestrator) UpdateWorkers(workers []Worker) { o.mu.Lock() defer o.mu.Unlock() o.workers = workers } // Task stores the required information for a task. type Task struct { Definition interface{} Instances int } // AddTask adds a new task to the expected workload. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) { o.mu.Lock() defer o.mu.Unlock() // Ensure we don't already have this task for _, t := range o.expectedTasks { if taskDefinition == t.Definition { return } } t := Task{Definition: taskDefinition, Instances: 1} for _, opt := range opts { opt(&t) } o.expectedTasks = append(o.expectedTasks, t) } // TaskOption is used to configure a task when it is being added. type TaskOption func(*Task) // WithTaskInstances configures the number of tasks. Defaults to 1. func WithTaskInstances(i int) TaskOption { return func(t *Task) { t.Instances = i } } // RemoveTask removes a task from the expected workload. The update will not // take affect until the next term. It is safe to invoke AddTask, RemoveTask // and UpdateTasks on multiple go-routines. func (o *Orchestrator) RemoveTask(taskDefinition interface{}) { o.mu.Lock() defer o.mu.Unlock() idx := containsTask(taskDefinition, o.expectedTasks) if idx < 0 { return } o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...) } // UpdateTasks overwrites the expected task list. The update will not take // affect until the next term. It is safe to invoke AddTask, RemoveTask and // UpdateTasks on multiple go-routines. func (o *Orchestrator) UpdateTasks(tasks []Task) { o.mu.Lock() defer o.mu.Unlock() o.expectedTasks = tasks } // ListExpectedTasks returns the current list of the expected tasks. func (o *Orchestrator) ListExpectedTasks() []Task { o.mu.Lock() defer o.mu.Unlock() return o.expectedTasks } // WorkerState stores the state of a worker. type WorkerState struct { Worker Worker // Tasks are the task definitions the worker is servicing. Tasks []interface{} } // LastActual returns the actual from the last term. It will return nil // before the first term. func (o *Orchestrator) LastActual() []WorkerState { o.mu.Lock() defer o.mu.Unlock() return o.lastActual } // rebalance will rebalance tasks across the workers. If any worker has too // many tasks, it will be added to the remove map, and added to the returned // add slice. func rebalance( toAdd map[interface{}]int, toRemove, actual map[Worker][]interface{}, ) (map[interface{}]int, map[Worker][]interface{}) { counts := counts(actual, toRemove) if len(counts) == 0 { return toAdd, toRemove } var total int for _, c := range counts { total += c.taskCount } for _, addCount := range toAdd { total += addCount } maxPerNode := total / len(counts) if maxPerNode == 0 || total%len(counts) != 0 { maxPerNode++ } for _, c := range counts { if c.taskCount > maxPerNode { task := actual[c.worker][0] toRemove[c.worker] = append(toRemove[c.worker], task) toAdd[task]++ } } return toAdd, toRemove } // hasEnoughInstances looks at each task in the given actual list and ensures // a worker node is servicing the task. func hasEnoughInstances(t Task, actual map[Worker][]interface{}) (needs int) { var count int for _, a := range actual { if contains(t.Definition, a) >= 0 { count++ } } return t.Instances - count } // contains returns the index of the given interface{} (x) in the slice y. If the // interface{} is not present in the slice, it returns -1. func
(x interface{}, y []interface{}) int { for i, t := range y { if t == x { return i } } return -1 } // containsTask returns the index of the given task name in the tasks. If the // task is not found, it returns -1. func containsTask(task interface{}, tasks []Task) int { for i, t := range tasks { if t.Definition == task { return i } } return -1 } // containsWorker returns the index of the given worker name in the workers. If the // worker is not found, it returns -1. func containsWorker(worker Worker, workers []Worker) int { for i, w := range workers { if w.Identifier == worker.Identifier { return i } } return -1 }
contains
identifier_name
warnings.go
// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shoot import ( "context" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "github.com/gardener/gardener/pkg/apis/core" "github.com/gardener/gardener/pkg/apis/core/helper" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" versionutils "github.com/gardener/gardener/pkg/utils/version" ) // GetWarnings returns warnings for the provided shoot. func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot == nil { return nil } var warnings []string if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) { warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details") } // TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns. warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...) if oldShoot != nil { warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...) warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...) // Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`. k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25") k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23") if k8sLess125 && k8sGreaterEqual123 { if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" { warnings = append(warnings, warning) } } } if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil { warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.") } return warnings } func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string { var warnings []string if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns)) } return warnings } func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) { return nil } if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"} } var ( rotation = shoot.Status.Credentials.Rotation warnings []string ) if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details") } if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details") } if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) && (rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details") } if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) && (rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details") } if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details") } if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details") } return warnings } func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return nil } var ( warnings []string recommendedCompletionInterval = credentialsRotationInterval / 3 rotation = shoot.Status.Credentials.Rotation ) // Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are // rotated in "one phase" are excluded. if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval)) } if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval)) } if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval)) } return warnings } func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool { return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold) } func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool { if lastInitiationFinishedTime == nil { return false } if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) { return false
func isOldEnough(t time.Time, threshold time.Duration) bool { return t.UTC().Add(threshold).Before(time.Now().UTC()) } func completionWarning(credentials string, recommendedCompletionInterval time.Duration) string { return fmt.Sprintf("the %s rotation initiation was finished more than %s ago and should be completed", credentials, recommendedCompletionInterval) } func getWarningsForPSPAdmissionPlugin(shoot *core.Shoot) string { if !helper.IsWorkerless(shoot) && shoot.Spec.Kubernetes.KubeAPIServer != nil { for _, plugin := range shoot.Spec.Kubernetes.KubeAPIServer.AdmissionPlugins { if plugin.Name == "PodSecurityPolicy" && pointer.BoolDeref(plugin.Disabled, false) { return "" } } } return "you should consider migrating to PodSecurity, see https://github.com/gardener/gardener/blob/master/docs/usage/pod-security.md#migrating-from-podsecuritypolicys-to-podsecurity-admission-controller for details" }
} return isOldEnough(lastInitiationFinishedTime.Time, threshold) }
random_line_split
warnings.go
// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shoot import ( "context" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "github.com/gardener/gardener/pkg/apis/core" "github.com/gardener/gardener/pkg/apis/core/helper" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" versionutils "github.com/gardener/gardener/pkg/utils/version" ) // GetWarnings returns warnings for the provided shoot. func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot == nil { return nil } var warnings []string if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) { warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details") } // TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns. warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...) if oldShoot != nil { warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...) warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...) // Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`. k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25") k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23") if k8sLess125 && k8sGreaterEqual123 { if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" { warnings = append(warnings, warning) } } } if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil { warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.") } return warnings } func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string { var warnings []string if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns)) } return warnings } func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) { return nil } if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"} } var ( rotation = shoot.Status.Credentials.Rotation warnings []string ) if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details") } if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details") } if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) && (rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details") } if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) && (rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details") } if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details") } if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details") } return warnings } func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return nil } var ( warnings []string recommendedCompletionInterval = credentialsRotationInterval / 3 rotation = shoot.Status.Credentials.Rotation ) // Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are // rotated in "one phase" are excluded. if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval)) } if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval)) } if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval)) } return warnings } func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool { return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold) } func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool { if lastInitiationFinishedTime == nil { return false } if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) { return false } return isOldEnough(lastInitiationFinishedTime.Time, threshold) } func isOldEnough(t time.Time, threshold time.Duration) bool { return t.UTC().Add(threshold).Before(time.Now().UTC()) } func completionWarning(credentials string, recommendedCompletionInterval time.Duration) string { return fmt.Sprintf("the %s rotation initiation was finished more than %s ago and should be completed", credentials, recommendedCompletionInterval) } func getWarningsForPSPAdmissionPlugin(shoot *core.Shoot) string
{ if !helper.IsWorkerless(shoot) && shoot.Spec.Kubernetes.KubeAPIServer != nil { for _, plugin := range shoot.Spec.Kubernetes.KubeAPIServer.AdmissionPlugins { if plugin.Name == "PodSecurityPolicy" && pointer.BoolDeref(plugin.Disabled, false) { return "" } } } return "you should consider migrating to PodSecurity, see https://github.com/gardener/gardener/blob/master/docs/usage/pod-security.md#migrating-from-podsecuritypolicys-to-podsecurity-admission-controller for details" }
identifier_body
warnings.go
// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shoot import ( "context" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "github.com/gardener/gardener/pkg/apis/core" "github.com/gardener/gardener/pkg/apis/core/helper" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" versionutils "github.com/gardener/gardener/pkg/utils/version" ) // GetWarnings returns warnings for the provided shoot. func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot == nil { return nil } var warnings []string if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) { warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details") } // TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns. warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...) if oldShoot != nil { warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...) warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...) // Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`. k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25") k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23") if k8sLess125 && k8sGreaterEqual123 { if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" { warnings = append(warnings, warning) } } } if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil { warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.") } return warnings } func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string { var warnings []string if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok
return warnings } func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) { return nil } if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"} } var ( rotation = shoot.Status.Credentials.Rotation warnings []string ) if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details") } if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details") } if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) && (rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details") } if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) && (rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details") } if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details") } if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details") } return warnings } func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return nil } var ( warnings []string recommendedCompletionInterval = credentialsRotationInterval / 3 rotation = shoot.Status.Credentials.Rotation ) // Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are // rotated in "one phase" are excluded. if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval)) } if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval)) } if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval)) } return warnings } func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool { return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold) } func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool { if lastInitiationFinishedTime == nil { return false } if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) { return false } return isOldEnough(lastInitiationFinishedTime.Time, threshold) } func isOldEnough(t time.Time, threshold time.Duration) bool { return t.UTC().Add(threshold).Before(time.Now().UTC()) } func completionWarning(credentials string, recommendedCompletionInterval time.Duration) string { return fmt.Sprintf("the %s rotation initiation was finished more than %s ago and should be completed", credentials, recommendedCompletionInterval) } func getWarningsForPSPAdmissionPlugin(shoot *core.Shoot) string { if !helper.IsWorkerless(shoot) && shoot.Spec.Kubernetes.KubeAPIServer != nil { for _, plugin := range shoot.Spec.Kubernetes.KubeAPIServer.AdmissionPlugins { if plugin.Name == "PodSecurityPolicy" && pointer.BoolDeref(plugin.Disabled, false) { return "" } } } return "you should consider migrating to PodSecurity, see https://github.com/gardener/gardener/blob/master/docs/usage/pod-security.md#migrating-from-podsecuritypolicys-to-podsecurity-admission-controller for details" }
{ warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns)) }
conditional_block
warnings.go
// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shoot import ( "context" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "github.com/gardener/gardener/pkg/apis/core" "github.com/gardener/gardener/pkg/apis/core/helper" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" versionutils "github.com/gardener/gardener/pkg/utils/version" ) // GetWarnings returns warnings for the provided shoot. func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot == nil { return nil } var warnings []string if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) { warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details") } // TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns. warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...) if oldShoot != nil { warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...) warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...) // Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`. k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25") k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23") if k8sLess125 && k8sGreaterEqual123 { if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" { warnings = append(warnings, warning) } } } if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil { warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.") } return warnings } func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string { var warnings []string if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns)) } if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok { warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns)) } return warnings } func
(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) { return nil } if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"} } var ( rotation = shoot.Status.Credentials.Rotation warnings []string ) if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details") } if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details") } if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) && (rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details") } if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) && (rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details") } if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) { warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details") } if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) { warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details") } return warnings } func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string { if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil { return nil } var ( warnings []string recommendedCompletionInterval = credentialsRotationInterval / 3 rotation = shoot.Status.Credentials.Rotation ) // Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are // rotated in "one phase" are excluded. if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval)) } if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval)) } if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) { warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval)) } return warnings } func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool { return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold) } func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool { if lastInitiationFinishedTime == nil { return false } if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) { return false } return isOldEnough(lastInitiationFinishedTime.Time, threshold) } func isOldEnough(t time.Time, threshold time.Duration) bool { return t.UTC().Add(threshold).Before(time.Now().UTC()) } func completionWarning(credentials string, recommendedCompletionInterval time.Duration) string { return fmt.Sprintf("the %s rotation initiation was finished more than %s ago and should be completed", credentials, recommendedCompletionInterval) } func getWarningsForPSPAdmissionPlugin(shoot *core.Shoot) string { if !helper.IsWorkerless(shoot) && shoot.Spec.Kubernetes.KubeAPIServer != nil { for _, plugin := range shoot.Spec.Kubernetes.KubeAPIServer.AdmissionPlugins { if plugin.Name == "PodSecurityPolicy" && pointer.BoolDeref(plugin.Disabled, false) { return "" } } } return "you should consider migrating to PodSecurity, see https://github.com/gardener/gardener/blob/master/docs/usage/pod-security.md#migrating-from-podsecuritypolicys-to-podsecurity-admission-controller for details" }
getWarningsForDueCredentialsRotations
identifier_name
train_gcn.py
#Copyright (C) 2021 Fanwei Kong, Shawn C. Shadden, University of California, Berkeley #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "src")) import glob import functools import pickle import argparse import numpy as np from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.python.keras.optimizers import Adam, SGD from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras.utils import multi_gpu_model from utils import buildImageDataset, construct_feed_dict from custom_layers import * from augmentation import change_intensity_img, _augment_deformnet from dataset import get_baseline_dataset, get_baseline_dataset_deformnet from model import DeformNet from loss import mesh_loss_geometric_cf, point_loss_cf, binary_bce_dice_loss from call_backs import * """# Set up""" parser = argparse.ArgumentParser() parser.add_argument('--im_trains', nargs='+',help='Name of the folder containing the image data') parser.add_argument('--im_vals', nargs='+', help='Name of the folder containing the image data') parser.add_argument('--pre_train', default='', help="Filename of the pretrained graph model") parser.add_argument('--mesh', help='Name of the .dat file containing mesh info') parser.add_argument('--mesh_txt', nargs='+', help='Name of the mesh_info.txt file with tmplt scale and center into') parser.add_argument('--output', help='Name of the output folder') parser.add_argument('--attr_trains', nargs='+', help='Attribute name of the folders containing tf records') parser.add_argument('--attr_vals', nargs='+', help='Attribute name of the folders containing tf records') parser.add_argument('--train_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets') parser.add_argument('--val_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets') parser.add_argument('--file_pattern', default='*.tfrecords', help='Pattern of the .tfrecords files') parser.add_argument('--modality', nargs='+', help='Name of the modality, mr, ct, split by space') parser.add_argument('--num_epoch', type=int, help='Maximum number of epochs to run') parser.add_argument('--num_seg', type=int,default=1, help='Number of segmentation classes') parser.add_argument('--seg_weight', type=float, default=1., help='Weight of the segmentation loss') parser.add_argument('--mesh_ids', nargs='+', type=int, default=[2], help='Number of meshes to train') parser.add_argument('--batch_size', type=int, default=10, help='Batch size') parser.add_argument('--shuffle_buffer_size', type=int, default=10000, help='Shuffle buffer size') parser.add_argument('--lr', type=float, help='Learning rate') parser.add_argument('--cf_ratio', type=float, default=1., help='Loss ratio between gt chamfer loss and pred chamfer loss') parser.add_argument('--size', type = int, nargs='+', help='Image dimensions') parser.add_argument('--weights', type = float, nargs='+', help='Loss weights for geometric loss') parser.add_argument('--hidden_dim', type = int, default=128, help='Hidden dimension') parser.add_argument('--amplify_factor', type=float, default=1., help="amplify_factor of the predicted displacements") args = parser.parse_args() img_shape = (args.size[0], args.size[1], args.size[2], 1) save_loss_path = args.output save_model_path = os.path.join(args.output, "weights_gcn.hdf5") """ Create new directories """ try: os.makedirs(os.path.dirname(save_model_path)) os.makedirs(os.path.dirname(save_loss_path)) except Exception as e: print(e) """# Feed in mesh info""" pkl = pickle.load(open(args.mesh, 'rb')) mesh_info = construct_feed_dict(pkl) mesh_info['mesh_center'] = [np.zeros(3) for i in range(len(args.mesh_ids))] mesh_info['mesh_scale'] = [0 for i in range(len(args.mesh_ids))] mesh_info['mesh_area'] = [0 for i in range(len(args.mesh_ids))] mesh_info['edge_length_scaled'] = [np.zeros(3) for i in range(len(args.mesh_ids))] # 3 is number of blocks for txt_fn in args.mesh_txt: for i in range(len(args.mesh_ids)): ctr_scale = np.loadtxt(txt_fn) if len(ctr_scale.shape)==1: ctr_scale = np.expand_dims(ctr_scale, axis=0) mesh_info['mesh_center'][i] += ctr_scale[i, :-2]/len(args.modality) mesh_info['mesh_scale'][i] += ctr_scale[i, -2]/len(args.modality) mesh_info['mesh_area'][i] += ctr_scale[i, -1]/len(args.modality) for i in range(len(args.mesh_ids)): r = mesh_info['mesh_scale'][i]*2 scale = r * np.mean(args.size) area_ratio = mesh_info['mesh_area'][i]/(4*np.pi*r*r) mesh_info['edge_length_scaled'][i] = np.array(mesh_info['edge_length']) * scale * scale * area_ratio print("Mesh center, scale: ", mesh_info['mesh_center'], mesh_info['mesh_scale']) print("Mesh edge: ", mesh_info['edge_length_scaled']) """## Set up train and validation datasets Note that we apply image augmentation to our training dataset but not our validation dataset. """ tr_cfg = {'change_intensity': {"scale": [0.9, 1.1],"shift": [-0.1, 0.1]}} tr_preprocessing_fn = functools.partial(_augment_deformnet, **tr_cfg) if_seg = True if args.num_seg>0 else False val_preprocessing_fn = functools.partial(_augment_deformnet) train_ds_list, val_ds_list = [], [] train_ds_num, val_ds_num = [], [] for data_folder_out, attr in zip(args.im_trains, args.attr_trains): x_train_filenames_i = buildImageDataset(data_folder_out, args.modality, 41, mode='_train'+attr, ext=args.file_pattern) train_ds_num.append(len(x_train_filenames_i)) train_ds_i = get_baseline_dataset_deformnet(x_train_filenames_i, preproc_fn=tr_preprocessing_fn, mesh_ids=args.mesh_ids, \ shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg) train_ds_list.append(train_ds_i) for data_val_folder_out, attr in zip(args.im_vals, args.attr_vals): x_val_filenames_i = buildImageDataset(data_val_folder_out, args.modality, 41, mode='_val'+attr, ext=args.file_pattern) val_ds_num.append(len(x_val_filenames_i)) val_ds_i = get_baseline_dataset_deformnet(x_val_filenames_i, preproc_fn=val_preprocessing_fn, mesh_ids=args.mesh_ids, \ shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg) val_ds_list.append(val_ds_i) train_data_weights = [w/np.sum(args.train_data_weights) for w in args.train_data_weights] val_data_weights = [w/np.sum(args.val_data_weights) for w in args.val_data_weights] print("Sampling probability for train and val datasets: ", train_data_weights, val_data_weights) train_ds = tf.data.experimental.sample_from_datasets(train_ds_list, weights=train_data_weights) train_ds = train_ds.batch(args.batch_size) val_ds = tf.data.experimental.sample_from_datasets(val_ds_list, weights=val_data_weights) val_ds = val_ds.batch(args.batch_size) num_train_examples = train_ds_num[np.argmax(train_data_weights)]/np.max(train_data_weights) num_val_examples = val_ds_num[np.argmax(val_data_weights)]/np.max(val_data_weights) print("Number of train, val samples after reweighting: ", num_train_examples, num_val_examples) """# Build the model""" model = DeformNet(args.batch_size, img_shape, mesh_info, amplify_factor=args.amplify_factor,num_mesh=len(args.mesh_ids), num_seg=args.num_seg) unet_gcn = model.build_keras() unet_gcn.summary(line_length=150) adam = Adam(lr=args.lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=True) output_keys = [node.op.name.split('/')[0] for node in unet_gcn.outputs] print("Output Keys: ", output_keys) if args.num_seg >0: losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][(i-1)%len(args.mesh_ids)]) for i in range(1, len(output_keys))] losses = [binary_bce_dice_loss] + losses else: losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][i%len(args.mesh_ids)]) for i in range(len(output_keys))] losses = dict(zip(output_keys, losses)) metric_loss, metric_key = [], [] for i in range(1, len(args.mesh_ids)+1): metric_key.append(output_keys[-i]) metric_loss.append(point_loss_cf) metrics_losses = dict(zip(metric_key, metric_loss)) metric_loss_weights = list(np.ones(len(args.mesh_ids))) loss_weights = list(np.ones(len(output_keys))) if args.num_seg > 0:
unet_gcn.compile(optimizer=adam, loss=losses,loss_weights=loss_weights, metrics=metrics_losses) """ Setup model checkpoint """ save_model_path = os.path.join(args.output, "weights_gcn.hdf5") cp_cd = SaveModelOnCD(metric_key, save_model_path, patience=50) lr_schedule = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=10, min_lr=0.000005) call_backs = [cp_cd,lr_schedule] try: if args.pre_train != '': unet_gcn.load_weights(args.pre_train) else: unet_gcn.load_weights(save_model_path) except Exception as e: print("Model not loaded", e) """ Training """ history =unet_gcn.fit(train_ds, steps_per_epoch=int(np.ceil(num_train_examples/float(args.batch_size))), epochs=args.num_epoch, validation_data=val_ds, validation_steps= int(np.ceil(num_val_examples / float(args.batch_size))), callbacks=call_backs) with open(save_loss_path+"_history", 'wb') as handle: # saving the history pickle.dump(history.history, handle)
loss_weights[0] = args.seg_weight
conditional_block
train_gcn.py
#Copyright (C) 2021 Fanwei Kong, Shawn C. Shadden, University of California, Berkeley #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "src")) import glob import functools import pickle import argparse import numpy as np from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.python.keras.optimizers import Adam, SGD from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras.utils import multi_gpu_model from utils import buildImageDataset, construct_feed_dict from custom_layers import * from augmentation import change_intensity_img, _augment_deformnet from dataset import get_baseline_dataset, get_baseline_dataset_deformnet from model import DeformNet from loss import mesh_loss_geometric_cf, point_loss_cf, binary_bce_dice_loss from call_backs import * """# Set up""" parser = argparse.ArgumentParser() parser.add_argument('--im_trains', nargs='+',help='Name of the folder containing the image data') parser.add_argument('--im_vals', nargs='+', help='Name of the folder containing the image data') parser.add_argument('--pre_train', default='', help="Filename of the pretrained graph model") parser.add_argument('--mesh', help='Name of the .dat file containing mesh info') parser.add_argument('--mesh_txt', nargs='+', help='Name of the mesh_info.txt file with tmplt scale and center into') parser.add_argument('--output', help='Name of the output folder') parser.add_argument('--attr_trains', nargs='+', help='Attribute name of the folders containing tf records') parser.add_argument('--attr_vals', nargs='+', help='Attribute name of the folders containing tf records') parser.add_argument('--train_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets') parser.add_argument('--val_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets') parser.add_argument('--file_pattern', default='*.tfrecords', help='Pattern of the .tfrecords files') parser.add_argument('--modality', nargs='+', help='Name of the modality, mr, ct, split by space') parser.add_argument('--num_epoch', type=int, help='Maximum number of epochs to run') parser.add_argument('--num_seg', type=int,default=1, help='Number of segmentation classes') parser.add_argument('--seg_weight', type=float, default=1., help='Weight of the segmentation loss') parser.add_argument('--mesh_ids', nargs='+', type=int, default=[2], help='Number of meshes to train') parser.add_argument('--batch_size', type=int, default=10, help='Batch size') parser.add_argument('--shuffle_buffer_size', type=int, default=10000, help='Shuffle buffer size') parser.add_argument('--lr', type=float, help='Learning rate') parser.add_argument('--cf_ratio', type=float, default=1., help='Loss ratio between gt chamfer loss and pred chamfer loss') parser.add_argument('--size', type = int, nargs='+', help='Image dimensions') parser.add_argument('--weights', type = float, nargs='+', help='Loss weights for geometric loss') parser.add_argument('--hidden_dim', type = int, default=128, help='Hidden dimension') parser.add_argument('--amplify_factor', type=float, default=1., help="amplify_factor of the predicted displacements") args = parser.parse_args() img_shape = (args.size[0], args.size[1], args.size[2], 1) save_loss_path = args.output save_model_path = os.path.join(args.output, "weights_gcn.hdf5") """ Create new directories """ try: os.makedirs(os.path.dirname(save_model_path)) os.makedirs(os.path.dirname(save_loss_path)) except Exception as e: print(e) """# Feed in mesh info""" pkl = pickle.load(open(args.mesh, 'rb')) mesh_info = construct_feed_dict(pkl) mesh_info['mesh_center'] = [np.zeros(3) for i in range(len(args.mesh_ids))] mesh_info['mesh_scale'] = [0 for i in range(len(args.mesh_ids))] mesh_info['mesh_area'] = [0 for i in range(len(args.mesh_ids))] mesh_info['edge_length_scaled'] = [np.zeros(3) for i in range(len(args.mesh_ids))] # 3 is number of blocks for txt_fn in args.mesh_txt: for i in range(len(args.mesh_ids)): ctr_scale = np.loadtxt(txt_fn) if len(ctr_scale.shape)==1: ctr_scale = np.expand_dims(ctr_scale, axis=0) mesh_info['mesh_center'][i] += ctr_scale[i, :-2]/len(args.modality) mesh_info['mesh_scale'][i] += ctr_scale[i, -2]/len(args.modality) mesh_info['mesh_area'][i] += ctr_scale[i, -1]/len(args.modality) for i in range(len(args.mesh_ids)): r = mesh_info['mesh_scale'][i]*2 scale = r * np.mean(args.size) area_ratio = mesh_info['mesh_area'][i]/(4*np.pi*r*r) mesh_info['edge_length_scaled'][i] = np.array(mesh_info['edge_length']) * scale * scale * area_ratio print("Mesh center, scale: ", mesh_info['mesh_center'], mesh_info['mesh_scale']) print("Mesh edge: ", mesh_info['edge_length_scaled']) """## Set up train and validation datasets Note that we apply image augmentation to our training dataset but not our validation dataset. """ tr_cfg = {'change_intensity': {"scale": [0.9, 1.1],"shift": [-0.1, 0.1]}} tr_preprocessing_fn = functools.partial(_augment_deformnet, **tr_cfg) if_seg = True if args.num_seg>0 else False val_preprocessing_fn = functools.partial(_augment_deformnet) train_ds_list, val_ds_list = [], [] train_ds_num, val_ds_num = [], [] for data_folder_out, attr in zip(args.im_trains, args.attr_trains): x_train_filenames_i = buildImageDataset(data_folder_out, args.modality, 41, mode='_train'+attr, ext=args.file_pattern) train_ds_num.append(len(x_train_filenames_i)) train_ds_i = get_baseline_dataset_deformnet(x_train_filenames_i, preproc_fn=tr_preprocessing_fn, mesh_ids=args.mesh_ids, \ shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg) train_ds_list.append(train_ds_i) for data_val_folder_out, attr in zip(args.im_vals, args.attr_vals): x_val_filenames_i = buildImageDataset(data_val_folder_out, args.modality, 41, mode='_val'+attr, ext=args.file_pattern) val_ds_num.append(len(x_val_filenames_i)) val_ds_i = get_baseline_dataset_deformnet(x_val_filenames_i, preproc_fn=val_preprocessing_fn, mesh_ids=args.mesh_ids, \ shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg) val_ds_list.append(val_ds_i) train_data_weights = [w/np.sum(args.train_data_weights) for w in args.train_data_weights] val_data_weights = [w/np.sum(args.val_data_weights) for w in args.val_data_weights] print("Sampling probability for train and val datasets: ", train_data_weights, val_data_weights) train_ds = tf.data.experimental.sample_from_datasets(train_ds_list, weights=train_data_weights) train_ds = train_ds.batch(args.batch_size) val_ds = tf.data.experimental.sample_from_datasets(val_ds_list, weights=val_data_weights) val_ds = val_ds.batch(args.batch_size)
"""# Build the model""" model = DeformNet(args.batch_size, img_shape, mesh_info, amplify_factor=args.amplify_factor,num_mesh=len(args.mesh_ids), num_seg=args.num_seg) unet_gcn = model.build_keras() unet_gcn.summary(line_length=150) adam = Adam(lr=args.lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=True) output_keys = [node.op.name.split('/')[0] for node in unet_gcn.outputs] print("Output Keys: ", output_keys) if args.num_seg >0: losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][(i-1)%len(args.mesh_ids)]) for i in range(1, len(output_keys))] losses = [binary_bce_dice_loss] + losses else: losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][i%len(args.mesh_ids)]) for i in range(len(output_keys))] losses = dict(zip(output_keys, losses)) metric_loss, metric_key = [], [] for i in range(1, len(args.mesh_ids)+1): metric_key.append(output_keys[-i]) metric_loss.append(point_loss_cf) metrics_losses = dict(zip(metric_key, metric_loss)) metric_loss_weights = list(np.ones(len(args.mesh_ids))) loss_weights = list(np.ones(len(output_keys))) if args.num_seg > 0: loss_weights[0] = args.seg_weight unet_gcn.compile(optimizer=adam, loss=losses,loss_weights=loss_weights, metrics=metrics_losses) """ Setup model checkpoint """ save_model_path = os.path.join(args.output, "weights_gcn.hdf5") cp_cd = SaveModelOnCD(metric_key, save_model_path, patience=50) lr_schedule = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=10, min_lr=0.000005) call_backs = [cp_cd,lr_schedule] try: if args.pre_train != '': unet_gcn.load_weights(args.pre_train) else: unet_gcn.load_weights(save_model_path) except Exception as e: print("Model not loaded", e) """ Training """ history =unet_gcn.fit(train_ds, steps_per_epoch=int(np.ceil(num_train_examples/float(args.batch_size))), epochs=args.num_epoch, validation_data=val_ds, validation_steps= int(np.ceil(num_val_examples / float(args.batch_size))), callbacks=call_backs) with open(save_loss_path+"_history", 'wb') as handle: # saving the history pickle.dump(history.history, handle)
num_train_examples = train_ds_num[np.argmax(train_data_weights)]/np.max(train_data_weights) num_val_examples = val_ds_num[np.argmax(val_data_weights)]/np.max(val_data_weights) print("Number of train, val samples after reweighting: ", num_train_examples, num_val_examples)
random_line_split
mod.rs
// Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. mod music; pub use music::MusicPlayer; use std::{ cell::{Cell, RefCell}, io::{self, BufReader, Cursor, Read}, }; use crate::common::vfs::{Vfs, VfsError}; use cgmath::{InnerSpace, Vector3}; use rodio::{ source::{Buffered, SamplesConverter}, Decoder, OutputStreamHandle, Sink, Source, }; use thiserror::Error; use chrono::Duration; pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001; const MAX_ENTITY_CHANNELS: usize = 128; #[derive(Error, Debug)] pub enum SoundError { #[error("No such music track: {0}")] NoSuchTrack(String), #[error("I/O error: {0}")] Io(#[from] io::Error), #[error("Virtual filesystem error: {0}")] Vfs(#[from] VfsError), #[error("WAV decoder error: {0}")] Decoder(#[from] rodio::decoder::DecoderError), } /// Data needed for sound spatialization. /// /// This struct is updated every frame. #[derive(Debug)] pub struct Listener { origin: Cell<Vector3<f32>>, left_ear: Cell<Vector3<f32>>, right_ear: Cell<Vector3<f32>>, } impl Listener { pub fn new() -> Listener { Listener { origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)), left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), } } pub fn origin(&self) -> Vector3<f32> { self.origin.get() } pub fn left_ear(&self) -> Vector3<f32> { self.left_ear.get() } pub fn right_ear(&self) -> Vector3<f32> { self.right_ear.get() } pub fn s
&self, new_origin: Vector3<f32>) { self.origin.set(new_origin); } pub fn set_left_ear(&self, new_origin: Vector3<f32>) { self.left_ear.set(new_origin); } pub fn set_right_ear(&self, new_origin: Vector3<f32>) { self.right_ear.set(new_origin); } pub fn attenuate( &self, emitter_origin: Vector3<f32>, base_volume: f32, attenuation: f32, ) -> f32 { let decay = (emitter_origin - self.origin.get()).magnitude() * attenuation * DISTANCE_ATTENUATION_FACTOR; let volume = ((1.0 - decay) * base_volume).max(0.0); volume } } #[derive(Clone)] pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>); impl AudioSource { pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError> where S: AsRef<str>, { let name = name.as_ref(); let full_path = "sound/".to_owned() + name; let mut file = vfs.open(&full_path)?; let mut data = Vec::new(); file.read_to_end(&mut data)?; let src = Decoder::new(Cursor::new(data))? .convert_samples() .buffered(); Ok(AudioSource(src)) } } pub struct StaticSound { origin: Vector3<f32>, sink: RefCell<Sink>, volume: f32, attenuation: f32, } impl StaticSound { pub fn new( stream: &OutputStreamHandle, origin: Vector3<f32>, src: AudioSource, volume: f32, attenuation: f32, listener: &Listener, ) -> StaticSound { // TODO: handle PlayError once PR accepted let sink = Sink::try_new(&stream).unwrap(); let infinite = src.0.clone().repeat_infinite(); sink.append(infinite); sink.set_volume(listener.attenuate(origin, volume, attenuation)); StaticSound { origin, sink: RefCell::new(sink), volume, attenuation, } } pub fn update(&self, listener: &Listener) { let sink = self.sink.borrow_mut(); sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation)); } } /// Represents a single audio channel, capable of playing one sound at a time. pub struct Channel { stream: OutputStreamHandle, sink: RefCell<Option<Sink>>, master_vol: Cell<f32>, attenuation: Cell<f32>, } impl Channel { /// Create a new `Channel` backed by the given `Device`. pub fn new(stream: OutputStreamHandle) -> Channel { Channel { stream, sink: RefCell::new(None), master_vol: Cell::new(0.0), attenuation: Cell::new(0.0), } } /// Play a new sound on this channel, cutting off any sound that was previously playing. pub fn play( &self, src: AudioSource, ent_pos: Vector3<f32>, listener: &Listener, volume: f32, attenuation: f32, ) { self.master_vol.set(volume); self.attenuation.set(attenuation); // stop the old sound self.sink.replace(None); // start the new sound let new_sink = Sink::try_new(&self.stream).unwrap(); new_sink.append(src.0); new_sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); self.sink.replace(Some(new_sink)); } pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) { if let Some(ref sink) = *self.sink.borrow_mut() { // attenuate using quake coordinates since distance is the same either way sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); }; } /// Stop the sound currently playing on this channel, if there is one. pub fn stop(&self) { self.sink.replace(None); } /// Returns whether or not this `Channel` is currently in use. pub fn in_use(&self) -> bool { let replace_sink; match *self.sink.borrow() { Some(ref sink) => replace_sink = sink.empty(), None => return false, } // if the sink isn't in use, free it if replace_sink { self.sink.replace(None); false } else { true } } } pub struct EntityChannel { start_time: Duration, // if None, sound is associated with a temp entity ent_id: Option<usize>, ent_channel: i8, channel: Channel, } impl EntityChannel { pub fn channel(&self) -> &Channel { &self.channel } pub fn entity_id(&self) -> Option<usize> { self.ent_id } } pub struct EntityMixer { stream: OutputStreamHandle, // TODO: replace with an array once const type parameters are implemented channels: Box<[Option<EntityChannel>]>, } impl EntityMixer { pub fn new(stream: OutputStreamHandle) -> EntityMixer { let mut channel_vec = Vec::new(); for _ in 0..MAX_ENTITY_CHANNELS { channel_vec.push(None); } EntityMixer { stream, channels: channel_vec.into_boxed_slice(), } } fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize { let mut oldest = 0; for (i, channel) in self.channels.iter().enumerate() { match *channel { Some(ref chan) => { // if this channel is free, return it if !chan.channel.in_use() { return i; } // replace sounds on the same entity channel if ent_channel != 0 && chan.ent_id == ent_id && (chan.ent_channel == ent_channel || ent_channel == -1) { return i; } // TODO: don't clobber player sounds with monster sounds // keep track of which sound started the earliest match self.channels[oldest] { Some(ref o) => { if chan.start_time < o.start_time { oldest = i; } } None => oldest = i, } } None => return i, } } // if there are no good channels, just replace the one that's been running the longest oldest } pub fn start_sound( &mut self, src: AudioSource, time: Duration, ent_id: Option<usize>, ent_channel: i8, volume: f32, attenuation: f32, origin: Vector3<f32>, listener: &Listener, ) { let chan_id = self.find_free_channel(ent_id, ent_channel); let new_channel = Channel::new(self.stream.clone()); new_channel.play( src.clone(), origin, listener, volume, attenuation, ); self.channels[chan_id] = Some(EntityChannel { start_time: time, ent_id, ent_channel, channel: new_channel, }) } pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> { self.channels.iter().filter_map(|e| e.as_ref()) } pub fn stream(&self) -> OutputStreamHandle { self.stream.clone() } }
et_origin(
identifier_name
mod.rs
// Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. mod music; pub use music::MusicPlayer; use std::{ cell::{Cell, RefCell}, io::{self, BufReader, Cursor, Read}, }; use crate::common::vfs::{Vfs, VfsError};
}; use thiserror::Error; use chrono::Duration; pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001; const MAX_ENTITY_CHANNELS: usize = 128; #[derive(Error, Debug)] pub enum SoundError { #[error("No such music track: {0}")] NoSuchTrack(String), #[error("I/O error: {0}")] Io(#[from] io::Error), #[error("Virtual filesystem error: {0}")] Vfs(#[from] VfsError), #[error("WAV decoder error: {0}")] Decoder(#[from] rodio::decoder::DecoderError), } /// Data needed for sound spatialization. /// /// This struct is updated every frame. #[derive(Debug)] pub struct Listener { origin: Cell<Vector3<f32>>, left_ear: Cell<Vector3<f32>>, right_ear: Cell<Vector3<f32>>, } impl Listener { pub fn new() -> Listener { Listener { origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)), left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), } } pub fn origin(&self) -> Vector3<f32> { self.origin.get() } pub fn left_ear(&self) -> Vector3<f32> { self.left_ear.get() } pub fn right_ear(&self) -> Vector3<f32> { self.right_ear.get() } pub fn set_origin(&self, new_origin: Vector3<f32>) { self.origin.set(new_origin); } pub fn set_left_ear(&self, new_origin: Vector3<f32>) { self.left_ear.set(new_origin); } pub fn set_right_ear(&self, new_origin: Vector3<f32>) { self.right_ear.set(new_origin); } pub fn attenuate( &self, emitter_origin: Vector3<f32>, base_volume: f32, attenuation: f32, ) -> f32 { let decay = (emitter_origin - self.origin.get()).magnitude() * attenuation * DISTANCE_ATTENUATION_FACTOR; let volume = ((1.0 - decay) * base_volume).max(0.0); volume } } #[derive(Clone)] pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>); impl AudioSource { pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError> where S: AsRef<str>, { let name = name.as_ref(); let full_path = "sound/".to_owned() + name; let mut file = vfs.open(&full_path)?; let mut data = Vec::new(); file.read_to_end(&mut data)?; let src = Decoder::new(Cursor::new(data))? .convert_samples() .buffered(); Ok(AudioSource(src)) } } pub struct StaticSound { origin: Vector3<f32>, sink: RefCell<Sink>, volume: f32, attenuation: f32, } impl StaticSound { pub fn new( stream: &OutputStreamHandle, origin: Vector3<f32>, src: AudioSource, volume: f32, attenuation: f32, listener: &Listener, ) -> StaticSound { // TODO: handle PlayError once PR accepted let sink = Sink::try_new(&stream).unwrap(); let infinite = src.0.clone().repeat_infinite(); sink.append(infinite); sink.set_volume(listener.attenuate(origin, volume, attenuation)); StaticSound { origin, sink: RefCell::new(sink), volume, attenuation, } } pub fn update(&self, listener: &Listener) { let sink = self.sink.borrow_mut(); sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation)); } } /// Represents a single audio channel, capable of playing one sound at a time. pub struct Channel { stream: OutputStreamHandle, sink: RefCell<Option<Sink>>, master_vol: Cell<f32>, attenuation: Cell<f32>, } impl Channel { /// Create a new `Channel` backed by the given `Device`. pub fn new(stream: OutputStreamHandle) -> Channel { Channel { stream, sink: RefCell::new(None), master_vol: Cell::new(0.0), attenuation: Cell::new(0.0), } } /// Play a new sound on this channel, cutting off any sound that was previously playing. pub fn play( &self, src: AudioSource, ent_pos: Vector3<f32>, listener: &Listener, volume: f32, attenuation: f32, ) { self.master_vol.set(volume); self.attenuation.set(attenuation); // stop the old sound self.sink.replace(None); // start the new sound let new_sink = Sink::try_new(&self.stream).unwrap(); new_sink.append(src.0); new_sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); self.sink.replace(Some(new_sink)); } pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) { if let Some(ref sink) = *self.sink.borrow_mut() { // attenuate using quake coordinates since distance is the same either way sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); }; } /// Stop the sound currently playing on this channel, if there is one. pub fn stop(&self) { self.sink.replace(None); } /// Returns whether or not this `Channel` is currently in use. pub fn in_use(&self) -> bool { let replace_sink; match *self.sink.borrow() { Some(ref sink) => replace_sink = sink.empty(), None => return false, } // if the sink isn't in use, free it if replace_sink { self.sink.replace(None); false } else { true } } } pub struct EntityChannel { start_time: Duration, // if None, sound is associated with a temp entity ent_id: Option<usize>, ent_channel: i8, channel: Channel, } impl EntityChannel { pub fn channel(&self) -> &Channel { &self.channel } pub fn entity_id(&self) -> Option<usize> { self.ent_id } } pub struct EntityMixer { stream: OutputStreamHandle, // TODO: replace with an array once const type parameters are implemented channels: Box<[Option<EntityChannel>]>, } impl EntityMixer { pub fn new(stream: OutputStreamHandle) -> EntityMixer { let mut channel_vec = Vec::new(); for _ in 0..MAX_ENTITY_CHANNELS { channel_vec.push(None); } EntityMixer { stream, channels: channel_vec.into_boxed_slice(), } } fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize { let mut oldest = 0; for (i, channel) in self.channels.iter().enumerate() { match *channel { Some(ref chan) => { // if this channel is free, return it if !chan.channel.in_use() { return i; } // replace sounds on the same entity channel if ent_channel != 0 && chan.ent_id == ent_id && (chan.ent_channel == ent_channel || ent_channel == -1) { return i; } // TODO: don't clobber player sounds with monster sounds // keep track of which sound started the earliest match self.channels[oldest] { Some(ref o) => { if chan.start_time < o.start_time { oldest = i; } } None => oldest = i, } } None => return i, } } // if there are no good channels, just replace the one that's been running the longest oldest } pub fn start_sound( &mut self, src: AudioSource, time: Duration, ent_id: Option<usize>, ent_channel: i8, volume: f32, attenuation: f32, origin: Vector3<f32>, listener: &Listener, ) { let chan_id = self.find_free_channel(ent_id, ent_channel); let new_channel = Channel::new(self.stream.clone()); new_channel.play( src.clone(), origin, listener, volume, attenuation, ); self.channels[chan_id] = Some(EntityChannel { start_time: time, ent_id, ent_channel, channel: new_channel, }) } pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> { self.channels.iter().filter_map(|e| e.as_ref()) } pub fn stream(&self) -> OutputStreamHandle { self.stream.clone() } }
use cgmath::{InnerSpace, Vector3}; use rodio::{ source::{Buffered, SamplesConverter}, Decoder, OutputStreamHandle, Sink, Source,
random_line_split
mod.rs
// Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. mod music; pub use music::MusicPlayer; use std::{ cell::{Cell, RefCell}, io::{self, BufReader, Cursor, Read}, }; use crate::common::vfs::{Vfs, VfsError}; use cgmath::{InnerSpace, Vector3}; use rodio::{ source::{Buffered, SamplesConverter}, Decoder, OutputStreamHandle, Sink, Source, }; use thiserror::Error; use chrono::Duration; pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001; const MAX_ENTITY_CHANNELS: usize = 128; #[derive(Error, Debug)] pub enum SoundError { #[error("No such music track: {0}")] NoSuchTrack(String), #[error("I/O error: {0}")] Io(#[from] io::Error), #[error("Virtual filesystem error: {0}")] Vfs(#[from] VfsError), #[error("WAV decoder error: {0}")] Decoder(#[from] rodio::decoder::DecoderError), } /// Data needed for sound spatialization. /// /// This struct is updated every frame. #[derive(Debug)] pub struct Listener { origin: Cell<Vector3<f32>>, left_ear: Cell<Vector3<f32>>, right_ear: Cell<Vector3<f32>>, } impl Listener { pub fn new() -> Listener { Listener { origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)), left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), } } pub fn origin(&self) -> Vector3<f32> { self.origin.get() } pub fn left_ear(&self) -> Vector3<f32> { self.left_ear.get() } pub fn right_ear(&self) -> Vector3<f32> { self.right_ear.get() } pub fn set_origin(&self, new_origin: Vector3<f32>) { self.origin.set(new_origin); } pub fn set_left_ear(&self, new_origin: Vector3<f32>) { self.left_ear.set(new_origin); } pub fn set_right_ear(&self, new_origin: Vector3<f32>) { self.right_ear.set(new_origin); } pub fn attenuate( &self, emitter_origin: Vector3<f32>, base_volume: f32, attenuation: f32, ) -> f32 { let decay = (emitter_origin - self.origin.get()).magnitude() * attenuation * DISTANCE_ATTENUATION_FACTOR; let volume = ((1.0 - decay) * base_volume).max(0.0); volume } } #[derive(Clone)] pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>); impl AudioSource { pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError> where S: AsRef<str>, { let name = name.as_ref(); let full_path = "sound/".to_owned() + name; let mut file = vfs.open(&full_path)?; let mut data = Vec::new(); file.read_to_end(&mut data)?; let src = Decoder::new(Cursor::new(data))? .convert_samples() .buffered(); Ok(AudioSource(src)) } } pub struct StaticSound { origin: Vector3<f32>, sink: RefCell<Sink>, volume: f32, attenuation: f32, } impl StaticSound { pub fn new( stream: &OutputStreamHandle, origin: Vector3<f32>, src: AudioSource, volume: f32, attenuation: f32, listener: &Listener, ) -> StaticSound { // TODO: handle PlayError once PR accepted let sink = Sink::try_new(&stream).unwrap(); let infinite = src.0.clone().repeat_infinite(); sink.append(infinite); sink.set_volume(listener.attenuate(origin, volume, attenuation)); StaticSound { origin, sink: RefCell::new(sink), volume, attenuation, } } pub fn update(&self, listener: &Listener) { let sink = self.sink.borrow_mut(); sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation)); } } /// Represents a single audio channel, capable of playing one sound at a time. pub struct Channel { stream: OutputStreamHandle, sink: RefCell<Option<Sink>>, master_vol: Cell<f32>, attenuation: Cell<f32>, } impl Channel { /// Create a new `Channel` backed by the given `Device`. pub fn new(stream: OutputStreamHandle) -> Channel { Channel { stream, sink: RefCell::new(None), master_vol: Cell::new(0.0), attenuation: Cell::new(0.0), } } /// Play a new sound on this channel, cutting off any sound that was previously playing. pub fn play( &self, src: AudioSource, ent_pos: Vector3<f32>, listener: &Listener, volume: f32, attenuation: f32, ) { self.master_vol.set(volume); self.attenuation.set(attenuation); // stop the old sound self.sink.replace(None); // start the new sound let new_sink = Sink::try_new(&self.stream).unwrap(); new_sink.append(src.0); new_sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); self.sink.replace(Some(new_sink)); } pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) { if let Some(ref sink) = *self.sink.borrow_mut() { // attenuate using quake coordinates since distance is the same either way sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); }; } /// Stop the sound currently playing on this channel, if there is one. pub fn stop(&self) { self.sink.replace(None); } /// Returns whether or not this `Channel` is currently in use. pub fn in_use(&self) -> bool { let replace_sink; match *self.sink.borrow() { Some(ref sink) => replace_sink = sink.empty(), None => return false, } // if the sink isn't in use, free it if replace_sink { self.sink.replace(None); false } else { true } } } pub struct EntityChannel { start_time: Duration, // if None, sound is associated with a temp entity ent_id: Option<usize>, ent_channel: i8, channel: Channel, } impl EntityChannel { pub fn channel(&self) -> &Channel { &self.channel } pub fn entity_id(&self) -> Option<usize> { self.ent_id } } pub struct EntityMixer { stream: OutputStreamHandle, // TODO: replace with an array once const type parameters are implemented channels: Box<[Option<EntityChannel>]>, } impl EntityMixer { pub fn new(stream: OutputStreamHandle) -> EntityMixer { let mut channel_vec = Vec::new(); for _ in 0..MAX_ENTITY_CHANNELS { channel_vec.push(None); } EntityMixer { stream, channels: channel_vec.into_boxed_slice(), } } fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
pub fn start_sound( &mut self, src: AudioSource, time: Duration, ent_id: Option<usize>, ent_channel: i8, volume: f32, attenuation: f32, origin: Vector3<f32>, listener: &Listener, ) { let chan_id = self.find_free_channel(ent_id, ent_channel); let new_channel = Channel::new(self.stream.clone()); new_channel.play( src.clone(), origin, listener, volume, attenuation, ); self.channels[chan_id] = Some(EntityChannel { start_time: time, ent_id, ent_channel, channel: new_channel, }) } pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> { self.channels.iter().filter_map(|e| e.as_ref()) } pub fn stream(&self) -> OutputStreamHandle { self.stream.clone() } }
let mut oldest = 0; for (i, channel) in self.channels.iter().enumerate() { match *channel { Some(ref chan) => { // if this channel is free, return it if !chan.channel.in_use() { return i; } // replace sounds on the same entity channel if ent_channel != 0 && chan.ent_id == ent_id && (chan.ent_channel == ent_channel || ent_channel == -1) { return i; } // TODO: don't clobber player sounds with monster sounds // keep track of which sound started the earliest match self.channels[oldest] { Some(ref o) => { if chan.start_time < o.start_time { oldest = i; } } None => oldest = i, } } None => return i, } } // if there are no good channels, just replace the one that's been running the longest oldest }
identifier_body
mod.rs
// Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. mod music; pub use music::MusicPlayer; use std::{ cell::{Cell, RefCell}, io::{self, BufReader, Cursor, Read}, }; use crate::common::vfs::{Vfs, VfsError}; use cgmath::{InnerSpace, Vector3}; use rodio::{ source::{Buffered, SamplesConverter}, Decoder, OutputStreamHandle, Sink, Source, }; use thiserror::Error; use chrono::Duration; pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001; const MAX_ENTITY_CHANNELS: usize = 128; #[derive(Error, Debug)] pub enum SoundError { #[error("No such music track: {0}")] NoSuchTrack(String), #[error("I/O error: {0}")] Io(#[from] io::Error), #[error("Virtual filesystem error: {0}")] Vfs(#[from] VfsError), #[error("WAV decoder error: {0}")] Decoder(#[from] rodio::decoder::DecoderError), } /// Data needed for sound spatialization. /// /// This struct is updated every frame. #[derive(Debug)] pub struct Listener { origin: Cell<Vector3<f32>>, left_ear: Cell<Vector3<f32>>, right_ear: Cell<Vector3<f32>>, } impl Listener { pub fn new() -> Listener { Listener { origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)), left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)), } } pub fn origin(&self) -> Vector3<f32> { self.origin.get() } pub fn left_ear(&self) -> Vector3<f32> { self.left_ear.get() } pub fn right_ear(&self) -> Vector3<f32> { self.right_ear.get() } pub fn set_origin(&self, new_origin: Vector3<f32>) { self.origin.set(new_origin); } pub fn set_left_ear(&self, new_origin: Vector3<f32>) { self.left_ear.set(new_origin); } pub fn set_right_ear(&self, new_origin: Vector3<f32>) { self.right_ear.set(new_origin); } pub fn attenuate( &self, emitter_origin: Vector3<f32>, base_volume: f32, attenuation: f32, ) -> f32 { let decay = (emitter_origin - self.origin.get()).magnitude() * attenuation * DISTANCE_ATTENUATION_FACTOR; let volume = ((1.0 - decay) * base_volume).max(0.0); volume } } #[derive(Clone)] pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>); impl AudioSource { pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError> where S: AsRef<str>, { let name = name.as_ref(); let full_path = "sound/".to_owned() + name; let mut file = vfs.open(&full_path)?; let mut data = Vec::new(); file.read_to_end(&mut data)?; let src = Decoder::new(Cursor::new(data))? .convert_samples() .buffered(); Ok(AudioSource(src)) } } pub struct StaticSound { origin: Vector3<f32>, sink: RefCell<Sink>, volume: f32, attenuation: f32, } impl StaticSound { pub fn new( stream: &OutputStreamHandle, origin: Vector3<f32>, src: AudioSource, volume: f32, attenuation: f32, listener: &Listener, ) -> StaticSound { // TODO: handle PlayError once PR accepted let sink = Sink::try_new(&stream).unwrap(); let infinite = src.0.clone().repeat_infinite(); sink.append(infinite); sink.set_volume(listener.attenuate(origin, volume, attenuation)); StaticSound { origin, sink: RefCell::new(sink), volume, attenuation, } } pub fn update(&self, listener: &Listener) { let sink = self.sink.borrow_mut(); sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation)); } } /// Represents a single audio channel, capable of playing one sound at a time. pub struct Channel { stream: OutputStreamHandle, sink: RefCell<Option<Sink>>, master_vol: Cell<f32>, attenuation: Cell<f32>, } impl Channel { /// Create a new `Channel` backed by the given `Device`. pub fn new(stream: OutputStreamHandle) -> Channel { Channel { stream, sink: RefCell::new(None), master_vol: Cell::new(0.0), attenuation: Cell::new(0.0), } } /// Play a new sound on this channel, cutting off any sound that was previously playing. pub fn play( &self, src: AudioSource, ent_pos: Vector3<f32>, listener: &Listener, volume: f32, attenuation: f32, ) { self.master_vol.set(volume); self.attenuation.set(attenuation); // stop the old sound self.sink.replace(None); // start the new sound let new_sink = Sink::try_new(&self.stream).unwrap(); new_sink.append(src.0); new_sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); self.sink.replace(Some(new_sink)); } pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) { if let Some(ref sink) = *self.sink.borrow_mut() { // attenuate using quake coordinates since distance is the same either way sink.set_volume(listener.attenuate( ent_pos, self.master_vol.get(), self.attenuation.get(), )); }; } /// Stop the sound currently playing on this channel, if there is one. pub fn stop(&self) { self.sink.replace(None); } /// Returns whether or not this `Channel` is currently in use. pub fn in_use(&self) -> bool { let replace_sink; match *self.sink.borrow() { Some(ref sink) => replace_sink = sink.empty(), None => return false, } // if the sink isn't in use, free it if replace_sink { self.sink.replace(None); false } else { true } } } pub struct EntityChannel { start_time: Duration, // if None, sound is associated with a temp entity ent_id: Option<usize>, ent_channel: i8, channel: Channel, } impl EntityChannel { pub fn channel(&self) -> &Channel { &self.channel } pub fn entity_id(&self) -> Option<usize> { self.ent_id } } pub struct EntityMixer { stream: OutputStreamHandle, // TODO: replace with an array once const type parameters are implemented channels: Box<[Option<EntityChannel>]>, } impl EntityMixer { pub fn new(stream: OutputStreamHandle) -> EntityMixer { let mut channel_vec = Vec::new(); for _ in 0..MAX_ENTITY_CHANNELS { channel_vec.push(None); } EntityMixer { stream, channels: channel_vec.into_boxed_slice(), } } fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize { let mut oldest = 0; for (i, channel) in self.channels.iter().enumerate() { match *channel { Some(ref chan) => { // if this channel is free, return it if !chan.channel.in_use() {
// replace sounds on the same entity channel if ent_channel != 0 && chan.ent_id == ent_id && (chan.ent_channel == ent_channel || ent_channel == -1) { return i; } // TODO: don't clobber player sounds with monster sounds // keep track of which sound started the earliest match self.channels[oldest] { Some(ref o) => { if chan.start_time < o.start_time { oldest = i; } } None => oldest = i, } } None => return i, } } // if there are no good channels, just replace the one that's been running the longest oldest } pub fn start_sound( &mut self, src: AudioSource, time: Duration, ent_id: Option<usize>, ent_channel: i8, volume: f32, attenuation: f32, origin: Vector3<f32>, listener: &Listener, ) { let chan_id = self.find_free_channel(ent_id, ent_channel); let new_channel = Channel::new(self.stream.clone()); new_channel.play( src.clone(), origin, listener, volume, attenuation, ); self.channels[chan_id] = Some(EntityChannel { start_time: time, ent_id, ent_channel, channel: new_channel, }) } pub fn iter_entity_channels(&self) -> impl Iterator<Item = &EntityChannel> { self.channels.iter().filter_map(|e| e.as_ref()) } pub fn stream(&self) -> OutputStreamHandle { self.stream.clone() } }
return i; }
conditional_block
dropck.rs
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use CrateCtxt; use check::regionck::RegionCtxt; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer; use middle::region; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::traits::{self, Reveal}; use util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{self, Span}; /// check_drop_impl confirms that the Drop implementation identfied by /// `drop_impl_did` is not any more specialized than the type it is /// attached to (Issue #8142). /// /// This means: /// /// 1. The self type must be nominal (this is already checked during /// coherence), /// /// 2. The generic region/type parameters of the impl's self-type must /// all be parameters of the Drop impl itself (i.e. no /// specialization like `impl Drop for Foo<i32>`), and, /// /// 3. Any bounds on the generic parameters must be reflected in the /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`). /// pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty; let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did); match dtor_self_type.sty { ty::TyAdt(adt_def, self_to_impl_substs) => { ensure_drop_params_and_item_params_correspond(ccx, drop_impl_did, dtor_self_type, adt_def.did)?; ensure_drop_predicates_are_implied_by_item_defn(ccx, drop_impl_did, &dtor_predicates, adt_def.did, self_to_impl_substs) } _ => { // Destructors only work on nominal types. This was // already checked by coherence, so we can panic here. let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); span_bug!(span, "should have been rejected by coherence check: {}", dtor_self_type); } } } fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, drop_impl_ty: Ty<'tcx>, self_type_did: DefId) -> Result<(), ()> { let tcx = ccx.tcx; let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap(); let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); // check that the impl type can be made to match the trait type. let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id); tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| { let tcx = infcx.tcx; let mut fulfillment_cx = traits::FulfillmentContext::new(); let named_type = tcx.lookup_item_type(self_type_did).ty; let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); let fresh_impl_substs = infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span), named_type, fresh_impl_self_ty) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0366, "Implementations of Drop cannot be specialized") .span_note(item_span, "Use same sequence of generic type and region \ parameters that is on the struct/enum definition") .emit(); return Err(()); } if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) { // this could be reached when we get lazy normalization infcx.report_fulfillment_errors(errors); return Err(()); } let free_regions = FreeRegionMap::new(); infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); Ok(()) }) } /// Confirms that every predicate imposed by dtor_predicates is /// implied by assuming the predicates attached to self_type_did. fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> { // Here is an example, analogous to that from // `compare_impl_method`. // // Consider a struct type: // // struct Type<'c, 'b:'c, 'a> { // x: &'a Contents // (contents are irrelevant; // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.) // } // // and a Drop impl: // // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> { // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y) // } // // We start out with self_to_impl_substs, that maps the generic // parameters of Type to that of the Drop impl. //
// definition yields the instantiated assumptions: // // ['y : 'z] // // We then check all of the predicates of the Drop impl: // // ['y:'z, 'x:'y] // // and ensure each is in the list of instantiated // assumptions. Here, `'y:'z` is present, but `'x:'y` is // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. let tcx = ccx.tcx; let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); // We can assume the predicates attached to struct/enum definition // hold. let generic_assumptions = tcx.lookup_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble // since the fulfill machinery merely turns outlives-predicates // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. assert_eq!(dtor_predicates.parent, None); for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of // the generic parameters of the item definition.) // This path now just checks *all* predicates via the direct // lookup, rather than using fulfill machinery. // // However, it may be more efficient in the future to batch // the analysis together via the fulfill , rather than the // repeated `contains` calls. if !assumptions_in_impl_context.contains(&predicate) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0367, "The requirement `{}` is added only by the Drop impl.", predicate) .span_note(item_span, "The same requirement must be part of \ the struct/enum definition") .emit(); } } if tcx.sess.has_errors() { return Err(()); } Ok(()) } /// check_safety_of_destructor_if_necessary confirms that the type /// expression `typ` conforms to the "Drop Check Rule" from the Sound /// Generic Drop (RFC 769). /// /// ---- /// /// The simplified (*) Drop Check Rule is the following: /// /// Let `v` be some value (either temporary or named) and 'a be some /// lifetime (scope). If the type of `v` owns data of type `D`, where /// /// * (1.) `D` has a lifetime- or type-parametric Drop implementation, /// (where that `Drop` implementation does not opt-out of /// this check via the `unsafe_destructor_blind_to_params` /// attribute), and /// * (2.) the structure of `D` can reach a reference of type `&'a _`, /// /// then 'a must strictly outlive the scope of v. /// /// ---- /// /// This function is meant to by applied to the type for every /// expression in the program. /// /// ---- /// /// (*) The qualifier "simplified" is attached to the above /// definition of the Drop Check Rule, because it is a simplification /// of the original Drop Check rule, which attempted to prove that /// some `Drop` implementations could not possibly access data even if /// it was technically reachable, due to parametricity. /// /// However, (1.) parametricity on its own turned out to be a /// necessary but insufficient condition, and (2.) future changes to /// the language are expected to make it impossible to ensure that a /// `Drop` implementation is actually parametric with respect to any /// particular type parameter. (In particular, impl specialization is /// expected to break the needed parametricity property beyond /// repair.) /// /// Therefore we have scaled back Drop-Check to a more conservative /// rule that does not attempt to deduce whether a `Drop` /// implementation could not possible access data of a given lifetime; /// instead Drop-Check now simply assumes that if a destructor has /// access (direct or indirect) to a lifetime parameter, then that /// lifetime must be forced to outlive that destructor's dynamic /// extent. We then provide the `unsafe_destructor_blind_to_params` /// attribute as a way for destructor implementations to opt-out of /// this conservative assumption (and thus assume the obligation of /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). /// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, typ: ty::Ty<'tcx>, span: Span, scope: region::CodeExtent) { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", typ, scope); let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| { span_bug!(span, "no enclosing scope found for scope: {:?}", scope) }); let result = iterate_over_potentially_unsafe_regions_in_type( &mut DropckContext { rcx: rcx, span: span, parent_scope: parent_scope, breadcrumbs: FnvHashSet() }, TypeContext::Root, typ, 0); match result { Ok(()) => {} Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => { let tcx = rcx.tcx; let mut err = struct_span_err!(tcx.sess, span, E0320, "overflow while adding drop-check rules for {}", typ); match *ctxt { TypeContext::Root => { // no need for an additional note if the overflow // was somehow on the root. } TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { AdtKind::Enum => format!("enum {} variant {}", tcx.item_path_str(def_id), variant), AdtKind::Struct => format!("struct {}", tcx.item_path_str(def_id)), AdtKind::Union => format!("union {}", tcx.item_path_str(def_id)), }; span_note!( &mut err, span, "overflowed on {} field {} type: {}", variant_name, field, detected_on_typ); } } err.emit(); } } } enum Error<'tcx> { Overflow(TypeContext, ty::Ty<'tcx>), } #[derive(Copy, Clone)] enum TypeContext { Root, ADT { def_id: DefId, variant: ast::Name, field: ast::Name, } } struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>, /// types that have already been traversed breadcrumbs: FnvHashSet<Ty<'tcx>>, /// span for error reporting span: Span, /// the scope reachable dtorck types must outlive parent_scope: region::CodeExtent } // `context` is used for reporting overflow errors fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>, context: TypeContext, ty: Ty<'tcx>, depth: usize) -> Result<(), Error<'tcx>> { let tcx = cx.rcx.tcx; // Issue #22443: Watch out for overflow. While we are careful to // handle regular types properly, non-regular ones cause problems. let recursion_limit = tcx.sess.recursion_limit.get(); if depth / 4 >= recursion_limit { // This can get into rather deep recursion, especially in the // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T. // use a higher recursion limit to avoid errors. return Err(Error::Overflow(context, ty)) } // canoncialize the regions in `ty` before inserting - infinitely many // region variables can refer to the same region. let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty); if !cx.breadcrumbs.insert(ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - cached", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); return Ok(()); // we already visited this type } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?}", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // If `typ` has a destructor, then we must ensure that all // borrowed data reachable via `typ` must outlive the parent // of `scope`. This is handled below. // // However, there is an important special case: for any Drop // impl that is tagged as "blind" to their parameters, // we assume that data borrowed via such type parameters // remains unreachable via that Drop impl. // // For example, consider: // // ```rust // #[unsafe_destructor_blind_to_params] // impl<T> Drop for Vec<T> { ... } // ``` // // which does have to be able to drop instances of `T`, but // otherwise cannot read data from `T`. // // Of course, for the type expression passed in for any such // unbounded type parameter `T`, we must resume the recursive // analysis on `T` (since it would be ignored by // type_must_outlive). if has_dtor_of_interest(tcx, ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} - is a dtorck type!", (0..depth).map(|_| ' ').collect::<String>(), ty); cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); return Ok(()); } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - checking interior", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // We still need to ensure all referenced data is safe. match ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => { // primitive - definitely safe Ok(()) } ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => { // single-element containers, behave like their element iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData<T> - behaves identically to T let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { for field in variant.fields.iter() { let fty = field.ty(tcx, substs); let fty = cx.rcx.fcx.resolve_type_vars_with_obligations( cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty)); iterate_over_potentially_unsafe_regions_in_type( cx, TypeContext::ADT { def_id: did, field: field.name, variant: variant.name, }, fty, depth+1)? } } Ok(()) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => { for ty in tys { iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? } Ok(()) } ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => { // these always come with a witness of liveness (references // explicitly, pointers implicitly, parameters by the // caller). Ok(()) } ty::TyFnDef(..) | ty::TyFnPtr(_) => { // FIXME(#26656): this type is always destruction-safe, but // it implicitly witnesses Self: Fn, which can be false. Ok(()) } ty::TyInfer(..) | ty::TyError => { tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck"); Ok(()) } // these are always dtorck ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(), } } fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyAdt(def, _) => { def.is_dtorck(tcx) } ty::TyTrait(..) | ty::TyProjection(..) | ty::TyAnon(..) => { debug!("ty: {:?} isn't known, and therefore is a dropck type", ty); true }, _ => false } }
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x} // // Applying this to the predicates (i.e. assumptions) provided by the item
random_line_split
dropck.rs
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use CrateCtxt; use check::regionck::RegionCtxt; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer; use middle::region; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::traits::{self, Reveal}; use util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{self, Span}; /// check_drop_impl confirms that the Drop implementation identfied by /// `drop_impl_did` is not any more specialized than the type it is /// attached to (Issue #8142). /// /// This means: /// /// 1. The self type must be nominal (this is already checked during /// coherence), /// /// 2. The generic region/type parameters of the impl's self-type must /// all be parameters of the Drop impl itself (i.e. no /// specialization like `impl Drop for Foo<i32>`), and, /// /// 3. Any bounds on the generic parameters must be reflected in the /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`). /// pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty; let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did); match dtor_self_type.sty { ty::TyAdt(adt_def, self_to_impl_substs) => { ensure_drop_params_and_item_params_correspond(ccx, drop_impl_did, dtor_self_type, adt_def.did)?; ensure_drop_predicates_are_implied_by_item_defn(ccx, drop_impl_did, &dtor_predicates, adt_def.did, self_to_impl_substs) } _ => { // Destructors only work on nominal types. This was // already checked by coherence, so we can panic here. let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); span_bug!(span, "should have been rejected by coherence check: {}", dtor_self_type); } } } fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, drop_impl_ty: Ty<'tcx>, self_type_did: DefId) -> Result<(), ()>
/// Confirms that every predicate imposed by dtor_predicates is /// implied by assuming the predicates attached to self_type_did. fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> { // Here is an example, analogous to that from // `compare_impl_method`. // // Consider a struct type: // // struct Type<'c, 'b:'c, 'a> { // x: &'a Contents // (contents are irrelevant; // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.) // } // // and a Drop impl: // // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> { // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y) // } // // We start out with self_to_impl_substs, that maps the generic // parameters of Type to that of the Drop impl. // // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x} // // Applying this to the predicates (i.e. assumptions) provided by the item // definition yields the instantiated assumptions: // // ['y : 'z] // // We then check all of the predicates of the Drop impl: // // ['y:'z, 'x:'y] // // and ensure each is in the list of instantiated // assumptions. Here, `'y:'z` is present, but `'x:'y` is // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. let tcx = ccx.tcx; let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); // We can assume the predicates attached to struct/enum definition // hold. let generic_assumptions = tcx.lookup_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble // since the fulfill machinery merely turns outlives-predicates // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. assert_eq!(dtor_predicates.parent, None); for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of // the generic parameters of the item definition.) // This path now just checks *all* predicates via the direct // lookup, rather than using fulfill machinery. // // However, it may be more efficient in the future to batch // the analysis together via the fulfill , rather than the // repeated `contains` calls. if !assumptions_in_impl_context.contains(&predicate) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0367, "The requirement `{}` is added only by the Drop impl.", predicate) .span_note(item_span, "The same requirement must be part of \ the struct/enum definition") .emit(); } } if tcx.sess.has_errors() { return Err(()); } Ok(()) } /// check_safety_of_destructor_if_necessary confirms that the type /// expression `typ` conforms to the "Drop Check Rule" from the Sound /// Generic Drop (RFC 769). /// /// ---- /// /// The simplified (*) Drop Check Rule is the following: /// /// Let `v` be some value (either temporary or named) and 'a be some /// lifetime (scope). If the type of `v` owns data of type `D`, where /// /// * (1.) `D` has a lifetime- or type-parametric Drop implementation, /// (where that `Drop` implementation does not opt-out of /// this check via the `unsafe_destructor_blind_to_params` /// attribute), and /// * (2.) the structure of `D` can reach a reference of type `&'a _`, /// /// then 'a must strictly outlive the scope of v. /// /// ---- /// /// This function is meant to by applied to the type for every /// expression in the program. /// /// ---- /// /// (*) The qualifier "simplified" is attached to the above /// definition of the Drop Check Rule, because it is a simplification /// of the original Drop Check rule, which attempted to prove that /// some `Drop` implementations could not possibly access data even if /// it was technically reachable, due to parametricity. /// /// However, (1.) parametricity on its own turned out to be a /// necessary but insufficient condition, and (2.) future changes to /// the language are expected to make it impossible to ensure that a /// `Drop` implementation is actually parametric with respect to any /// particular type parameter. (In particular, impl specialization is /// expected to break the needed parametricity property beyond /// repair.) /// /// Therefore we have scaled back Drop-Check to a more conservative /// rule that does not attempt to deduce whether a `Drop` /// implementation could not possible access data of a given lifetime; /// instead Drop-Check now simply assumes that if a destructor has /// access (direct or indirect) to a lifetime parameter, then that /// lifetime must be forced to outlive that destructor's dynamic /// extent. We then provide the `unsafe_destructor_blind_to_params` /// attribute as a way for destructor implementations to opt-out of /// this conservative assumption (and thus assume the obligation of /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). /// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, typ: ty::Ty<'tcx>, span: Span, scope: region::CodeExtent) { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", typ, scope); let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| { span_bug!(span, "no enclosing scope found for scope: {:?}", scope) }); let result = iterate_over_potentially_unsafe_regions_in_type( &mut DropckContext { rcx: rcx, span: span, parent_scope: parent_scope, breadcrumbs: FnvHashSet() }, TypeContext::Root, typ, 0); match result { Ok(()) => {} Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => { let tcx = rcx.tcx; let mut err = struct_span_err!(tcx.sess, span, E0320, "overflow while adding drop-check rules for {}", typ); match *ctxt { TypeContext::Root => { // no need for an additional note if the overflow // was somehow on the root. } TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { AdtKind::Enum => format!("enum {} variant {}", tcx.item_path_str(def_id), variant), AdtKind::Struct => format!("struct {}", tcx.item_path_str(def_id)), AdtKind::Union => format!("union {}", tcx.item_path_str(def_id)), }; span_note!( &mut err, span, "overflowed on {} field {} type: {}", variant_name, field, detected_on_typ); } } err.emit(); } } } enum Error<'tcx> { Overflow(TypeContext, ty::Ty<'tcx>), } #[derive(Copy, Clone)] enum TypeContext { Root, ADT { def_id: DefId, variant: ast::Name, field: ast::Name, } } struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>, /// types that have already been traversed breadcrumbs: FnvHashSet<Ty<'tcx>>, /// span for error reporting span: Span, /// the scope reachable dtorck types must outlive parent_scope: region::CodeExtent } // `context` is used for reporting overflow errors fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>, context: TypeContext, ty: Ty<'tcx>, depth: usize) -> Result<(), Error<'tcx>> { let tcx = cx.rcx.tcx; // Issue #22443: Watch out for overflow. While we are careful to // handle regular types properly, non-regular ones cause problems. let recursion_limit = tcx.sess.recursion_limit.get(); if depth / 4 >= recursion_limit { // This can get into rather deep recursion, especially in the // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T. // use a higher recursion limit to avoid errors. return Err(Error::Overflow(context, ty)) } // canoncialize the regions in `ty` before inserting - infinitely many // region variables can refer to the same region. let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty); if !cx.breadcrumbs.insert(ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - cached", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); return Ok(()); // we already visited this type } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?}", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // If `typ` has a destructor, then we must ensure that all // borrowed data reachable via `typ` must outlive the parent // of `scope`. This is handled below. // // However, there is an important special case: for any Drop // impl that is tagged as "blind" to their parameters, // we assume that data borrowed via such type parameters // remains unreachable via that Drop impl. // // For example, consider: // // ```rust // #[unsafe_destructor_blind_to_params] // impl<T> Drop for Vec<T> { ... } // ``` // // which does have to be able to drop instances of `T`, but // otherwise cannot read data from `T`. // // Of course, for the type expression passed in for any such // unbounded type parameter `T`, we must resume the recursive // analysis on `T` (since it would be ignored by // type_must_outlive). if has_dtor_of_interest(tcx, ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} - is a dtorck type!", (0..depth).map(|_| ' ').collect::<String>(), ty); cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); return Ok(()); } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - checking interior", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // We still need to ensure all referenced data is safe. match ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => { // primitive - definitely safe Ok(()) } ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => { // single-element containers, behave like their element iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData<T> - behaves identically to T let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { for field in variant.fields.iter() { let fty = field.ty(tcx, substs); let fty = cx.rcx.fcx.resolve_type_vars_with_obligations( cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty)); iterate_over_potentially_unsafe_regions_in_type( cx, TypeContext::ADT { def_id: did, field: field.name, variant: variant.name, }, fty, depth+1)? } } Ok(()) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => { for ty in tys { iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? } Ok(()) } ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => { // these always come with a witness of liveness (references // explicitly, pointers implicitly, parameters by the // caller). Ok(()) } ty::TyFnDef(..) | ty::TyFnPtr(_) => { // FIXME(#26656): this type is always destruction-safe, but // it implicitly witnesses Self: Fn, which can be false. Ok(()) } ty::TyInfer(..) | ty::TyError => { tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck"); Ok(()) } // these are always dtorck ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(), } } fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyAdt(def, _) => { def.is_dtorck(tcx) } ty::TyTrait(..) | ty::TyProjection(..) | ty::TyAnon(..) => { debug!("ty: {:?} isn't known, and therefore is a dropck type", ty); true }, _ => false } }
{ let tcx = ccx.tcx; let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap(); let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); // check that the impl type can be made to match the trait type. let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id); tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| { let tcx = infcx.tcx; let mut fulfillment_cx = traits::FulfillmentContext::new(); let named_type = tcx.lookup_item_type(self_type_did).ty; let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); let fresh_impl_substs = infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span), named_type, fresh_impl_self_ty) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0366, "Implementations of Drop cannot be specialized") .span_note(item_span, "Use same sequence of generic type and region \ parameters that is on the struct/enum definition") .emit(); return Err(()); } if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) { // this could be reached when we get lazy normalization infcx.report_fulfillment_errors(errors); return Err(()); } let free_regions = FreeRegionMap::new(); infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); Ok(()) }) }
identifier_body
dropck.rs
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use CrateCtxt; use check::regionck::RegionCtxt; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer; use middle::region; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::traits::{self, Reveal}; use util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{self, Span}; /// check_drop_impl confirms that the Drop implementation identfied by /// `drop_impl_did` is not any more specialized than the type it is /// attached to (Issue #8142). /// /// This means: /// /// 1. The self type must be nominal (this is already checked during /// coherence), /// /// 2. The generic region/type parameters of the impl's self-type must /// all be parameters of the Drop impl itself (i.e. no /// specialization like `impl Drop for Foo<i32>`), and, /// /// 3. Any bounds on the generic parameters must be reflected in the /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`). /// pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty; let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did); match dtor_self_type.sty { ty::TyAdt(adt_def, self_to_impl_substs) => { ensure_drop_params_and_item_params_correspond(ccx, drop_impl_did, dtor_self_type, adt_def.did)?; ensure_drop_predicates_are_implied_by_item_defn(ccx, drop_impl_did, &dtor_predicates, adt_def.did, self_to_impl_substs) } _ => { // Destructors only work on nominal types. This was // already checked by coherence, so we can panic here. let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); span_bug!(span, "should have been rejected by coherence check: {}", dtor_self_type); } } } fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, drop_impl_ty: Ty<'tcx>, self_type_did: DefId) -> Result<(), ()> { let tcx = ccx.tcx; let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap(); let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); // check that the impl type can be made to match the trait type. let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id); tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| { let tcx = infcx.tcx; let mut fulfillment_cx = traits::FulfillmentContext::new(); let named_type = tcx.lookup_item_type(self_type_did).ty; let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); let fresh_impl_substs = infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span), named_type, fresh_impl_self_ty) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0366, "Implementations of Drop cannot be specialized") .span_note(item_span, "Use same sequence of generic type and region \ parameters that is on the struct/enum definition") .emit(); return Err(()); } if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) { // this could be reached when we get lazy normalization infcx.report_fulfillment_errors(errors); return Err(()); } let free_regions = FreeRegionMap::new(); infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); Ok(()) }) } /// Confirms that every predicate imposed by dtor_predicates is /// implied by assuming the predicates attached to self_type_did. fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> { // Here is an example, analogous to that from // `compare_impl_method`. // // Consider a struct type: // // struct Type<'c, 'b:'c, 'a> { // x: &'a Contents // (contents are irrelevant; // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.) // } // // and a Drop impl: // // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> { // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y) // } // // We start out with self_to_impl_substs, that maps the generic // parameters of Type to that of the Drop impl. // // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x} // // Applying this to the predicates (i.e. assumptions) provided by the item // definition yields the instantiated assumptions: // // ['y : 'z] // // We then check all of the predicates of the Drop impl: // // ['y:'z, 'x:'y] // // and ensure each is in the list of instantiated // assumptions. Here, `'y:'z` is present, but `'x:'y` is // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. let tcx = ccx.tcx; let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); // We can assume the predicates attached to struct/enum definition // hold. let generic_assumptions = tcx.lookup_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble // since the fulfill machinery merely turns outlives-predicates // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. assert_eq!(dtor_predicates.parent, None); for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of // the generic parameters of the item definition.) // This path now just checks *all* predicates via the direct // lookup, rather than using fulfill machinery. // // However, it may be more efficient in the future to batch // the analysis together via the fulfill , rather than the // repeated `contains` calls. if !assumptions_in_impl_context.contains(&predicate) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0367, "The requirement `{}` is added only by the Drop impl.", predicate) .span_note(item_span, "The same requirement must be part of \ the struct/enum definition") .emit(); } } if tcx.sess.has_errors() { return Err(()); } Ok(()) } /// check_safety_of_destructor_if_necessary confirms that the type /// expression `typ` conforms to the "Drop Check Rule" from the Sound /// Generic Drop (RFC 769). /// /// ---- /// /// The simplified (*) Drop Check Rule is the following: /// /// Let `v` be some value (either temporary or named) and 'a be some /// lifetime (scope). If the type of `v` owns data of type `D`, where /// /// * (1.) `D` has a lifetime- or type-parametric Drop implementation, /// (where that `Drop` implementation does not opt-out of /// this check via the `unsafe_destructor_blind_to_params` /// attribute), and /// * (2.) the structure of `D` can reach a reference of type `&'a _`, /// /// then 'a must strictly outlive the scope of v. /// /// ---- /// /// This function is meant to by applied to the type for every /// expression in the program. /// /// ---- /// /// (*) The qualifier "simplified" is attached to the above /// definition of the Drop Check Rule, because it is a simplification /// of the original Drop Check rule, which attempted to prove that /// some `Drop` implementations could not possibly access data even if /// it was technically reachable, due to parametricity. /// /// However, (1.) parametricity on its own turned out to be a /// necessary but insufficient condition, and (2.) future changes to /// the language are expected to make it impossible to ensure that a /// `Drop` implementation is actually parametric with respect to any /// particular type parameter. (In particular, impl specialization is /// expected to break the needed parametricity property beyond /// repair.) /// /// Therefore we have scaled back Drop-Check to a more conservative /// rule that does not attempt to deduce whether a `Drop` /// implementation could not possible access data of a given lifetime; /// instead Drop-Check now simply assumes that if a destructor has /// access (direct or indirect) to a lifetime parameter, then that /// lifetime must be forced to outlive that destructor's dynamic /// extent. We then provide the `unsafe_destructor_blind_to_params` /// attribute as a way for destructor implementations to opt-out of /// this conservative assumption (and thus assume the obligation of /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). /// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, typ: ty::Ty<'tcx>, span: Span, scope: region::CodeExtent) { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", typ, scope); let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| { span_bug!(span, "no enclosing scope found for scope: {:?}", scope) }); let result = iterate_over_potentially_unsafe_regions_in_type( &mut DropckContext { rcx: rcx, span: span, parent_scope: parent_scope, breadcrumbs: FnvHashSet() }, TypeContext::Root, typ, 0); match result { Ok(()) => {} Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => { let tcx = rcx.tcx; let mut err = struct_span_err!(tcx.sess, span, E0320, "overflow while adding drop-check rules for {}", typ); match *ctxt { TypeContext::Root => { // no need for an additional note if the overflow // was somehow on the root. } TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { AdtKind::Enum => format!("enum {} variant {}", tcx.item_path_str(def_id), variant), AdtKind::Struct => format!("struct {}", tcx.item_path_str(def_id)), AdtKind::Union => format!("union {}", tcx.item_path_str(def_id)), }; span_note!( &mut err, span, "overflowed on {} field {} type: {}", variant_name, field, detected_on_typ); } } err.emit(); } } } enum
<'tcx> { Overflow(TypeContext, ty::Ty<'tcx>), } #[derive(Copy, Clone)] enum TypeContext { Root, ADT { def_id: DefId, variant: ast::Name, field: ast::Name, } } struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>, /// types that have already been traversed breadcrumbs: FnvHashSet<Ty<'tcx>>, /// span for error reporting span: Span, /// the scope reachable dtorck types must outlive parent_scope: region::CodeExtent } // `context` is used for reporting overflow errors fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>, context: TypeContext, ty: Ty<'tcx>, depth: usize) -> Result<(), Error<'tcx>> { let tcx = cx.rcx.tcx; // Issue #22443: Watch out for overflow. While we are careful to // handle regular types properly, non-regular ones cause problems. let recursion_limit = tcx.sess.recursion_limit.get(); if depth / 4 >= recursion_limit { // This can get into rather deep recursion, especially in the // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T. // use a higher recursion limit to avoid errors. return Err(Error::Overflow(context, ty)) } // canoncialize the regions in `ty` before inserting - infinitely many // region variables can refer to the same region. let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty); if !cx.breadcrumbs.insert(ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - cached", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); return Ok(()); // we already visited this type } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?}", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // If `typ` has a destructor, then we must ensure that all // borrowed data reachable via `typ` must outlive the parent // of `scope`. This is handled below. // // However, there is an important special case: for any Drop // impl that is tagged as "blind" to their parameters, // we assume that data borrowed via such type parameters // remains unreachable via that Drop impl. // // For example, consider: // // ```rust // #[unsafe_destructor_blind_to_params] // impl<T> Drop for Vec<T> { ... } // ``` // // which does have to be able to drop instances of `T`, but // otherwise cannot read data from `T`. // // Of course, for the type expression passed in for any such // unbounded type parameter `T`, we must resume the recursive // analysis on `T` (since it would be ignored by // type_must_outlive). if has_dtor_of_interest(tcx, ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} - is a dtorck type!", (0..depth).map(|_| ' ').collect::<String>(), ty); cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); return Ok(()); } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - checking interior", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // We still need to ensure all referenced data is safe. match ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => { // primitive - definitely safe Ok(()) } ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => { // single-element containers, behave like their element iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData<T> - behaves identically to T let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { for field in variant.fields.iter() { let fty = field.ty(tcx, substs); let fty = cx.rcx.fcx.resolve_type_vars_with_obligations( cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty)); iterate_over_potentially_unsafe_regions_in_type( cx, TypeContext::ADT { def_id: did, field: field.name, variant: variant.name, }, fty, depth+1)? } } Ok(()) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => { for ty in tys { iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? } Ok(()) } ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => { // these always come with a witness of liveness (references // explicitly, pointers implicitly, parameters by the // caller). Ok(()) } ty::TyFnDef(..) | ty::TyFnPtr(_) => { // FIXME(#26656): this type is always destruction-safe, but // it implicitly witnesses Self: Fn, which can be false. Ok(()) } ty::TyInfer(..) | ty::TyError => { tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck"); Ok(()) } // these are always dtorck ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(), } } fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyAdt(def, _) => { def.is_dtorck(tcx) } ty::TyTrait(..) | ty::TyProjection(..) | ty::TyAnon(..) => { debug!("ty: {:?} isn't known, and therefore is a dropck type", ty); true }, _ => false } }
Error
identifier_name
dropck.rs
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use CrateCtxt; use check::regionck::RegionCtxt; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer; use middle::region; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::traits::{self, Reveal}; use util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{self, Span}; /// check_drop_impl confirms that the Drop implementation identfied by /// `drop_impl_did` is not any more specialized than the type it is /// attached to (Issue #8142). /// /// This means: /// /// 1. The self type must be nominal (this is already checked during /// coherence), /// /// 2. The generic region/type parameters of the impl's self-type must /// all be parameters of the Drop impl itself (i.e. no /// specialization like `impl Drop for Foo<i32>`), and, /// /// 3. Any bounds on the generic parameters must be reflected in the /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`). /// pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty; let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did); match dtor_self_type.sty { ty::TyAdt(adt_def, self_to_impl_substs) => { ensure_drop_params_and_item_params_correspond(ccx, drop_impl_did, dtor_self_type, adt_def.did)?; ensure_drop_predicates_are_implied_by_item_defn(ccx, drop_impl_did, &dtor_predicates, adt_def.did, self_to_impl_substs) } _ => { // Destructors only work on nominal types. This was // already checked by coherence, so we can panic here. let span = ccx.tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); span_bug!(span, "should have been rejected by coherence check: {}", dtor_self_type); } } } fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, drop_impl_ty: Ty<'tcx>, self_type_did: DefId) -> Result<(), ()> { let tcx = ccx.tcx; let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap(); let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); // check that the impl type can be made to match the trait type. let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id); tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| { let tcx = infcx.tcx; let mut fulfillment_cx = traits::FulfillmentContext::new(); let named_type = tcx.lookup_item_type(self_type_did).ty; let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); let fresh_impl_substs = infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span), named_type, fresh_impl_self_ty) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0366, "Implementations of Drop cannot be specialized") .span_note(item_span, "Use same sequence of generic type and region \ parameters that is on the struct/enum definition") .emit(); return Err(()); } if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) { // this could be reached when we get lazy normalization infcx.report_fulfillment_errors(errors); return Err(()); } let free_regions = FreeRegionMap::new(); infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); Ok(()) }) } /// Confirms that every predicate imposed by dtor_predicates is /// implied by assuming the predicates attached to self_type_did. fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> { // Here is an example, analogous to that from // `compare_impl_method`. // // Consider a struct type: // // struct Type<'c, 'b:'c, 'a> { // x: &'a Contents // (contents are irrelevant; // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.) // } // // and a Drop impl: // // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> { // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y) // } // // We start out with self_to_impl_substs, that maps the generic // parameters of Type to that of the Drop impl. // // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x} // // Applying this to the predicates (i.e. assumptions) provided by the item // definition yields the instantiated assumptions: // // ['y : 'z] // // We then check all of the predicates of the Drop impl: // // ['y:'z, 'x:'y] // // and ensure each is in the list of instantiated // assumptions. Here, `'y:'z` is present, but `'x:'y` is // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. let tcx = ccx.tcx; let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); // We can assume the predicates attached to struct/enum definition // hold. let generic_assumptions = tcx.lookup_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble // since the fulfill machinery merely turns outlives-predicates // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. assert_eq!(dtor_predicates.parent, None); for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of // the generic parameters of the item definition.) // This path now just checks *all* predicates via the direct // lookup, rather than using fulfill machinery. // // However, it may be more efficient in the future to batch // the analysis together via the fulfill , rather than the // repeated `contains` calls. if !assumptions_in_impl_context.contains(&predicate) { let item_span = tcx.map.span(self_type_node_id); struct_span_err!(tcx.sess, drop_impl_span, E0367, "The requirement `{}` is added only by the Drop impl.", predicate) .span_note(item_span, "The same requirement must be part of \ the struct/enum definition") .emit(); } } if tcx.sess.has_errors() { return Err(()); } Ok(()) } /// check_safety_of_destructor_if_necessary confirms that the type /// expression `typ` conforms to the "Drop Check Rule" from the Sound /// Generic Drop (RFC 769). /// /// ---- /// /// The simplified (*) Drop Check Rule is the following: /// /// Let `v` be some value (either temporary or named) and 'a be some /// lifetime (scope). If the type of `v` owns data of type `D`, where /// /// * (1.) `D` has a lifetime- or type-parametric Drop implementation, /// (where that `Drop` implementation does not opt-out of /// this check via the `unsafe_destructor_blind_to_params` /// attribute), and /// * (2.) the structure of `D` can reach a reference of type `&'a _`, /// /// then 'a must strictly outlive the scope of v. /// /// ---- /// /// This function is meant to by applied to the type for every /// expression in the program. /// /// ---- /// /// (*) The qualifier "simplified" is attached to the above /// definition of the Drop Check Rule, because it is a simplification /// of the original Drop Check rule, which attempted to prove that /// some `Drop` implementations could not possibly access data even if /// it was technically reachable, due to parametricity. /// /// However, (1.) parametricity on its own turned out to be a /// necessary but insufficient condition, and (2.) future changes to /// the language are expected to make it impossible to ensure that a /// `Drop` implementation is actually parametric with respect to any /// particular type parameter. (In particular, impl specialization is /// expected to break the needed parametricity property beyond /// repair.) /// /// Therefore we have scaled back Drop-Check to a more conservative /// rule that does not attempt to deduce whether a `Drop` /// implementation could not possible access data of a given lifetime; /// instead Drop-Check now simply assumes that if a destructor has /// access (direct or indirect) to a lifetime parameter, then that /// lifetime must be forced to outlive that destructor's dynamic /// extent. We then provide the `unsafe_destructor_blind_to_params` /// attribute as a way for destructor implementations to opt-out of /// this conservative assumption (and thus assume the obligation of /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). /// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, typ: ty::Ty<'tcx>, span: Span, scope: region::CodeExtent) { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", typ, scope); let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| { span_bug!(span, "no enclosing scope found for scope: {:?}", scope) }); let result = iterate_over_potentially_unsafe_regions_in_type( &mut DropckContext { rcx: rcx, span: span, parent_scope: parent_scope, breadcrumbs: FnvHashSet() }, TypeContext::Root, typ, 0); match result { Ok(()) => {} Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => { let tcx = rcx.tcx; let mut err = struct_span_err!(tcx.sess, span, E0320, "overflow while adding drop-check rules for {}", typ); match *ctxt { TypeContext::Root => { // no need for an additional note if the overflow // was somehow on the root. } TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { AdtKind::Enum => format!("enum {} variant {}", tcx.item_path_str(def_id), variant), AdtKind::Struct => format!("struct {}", tcx.item_path_str(def_id)), AdtKind::Union => format!("union {}", tcx.item_path_str(def_id)), }; span_note!( &mut err, span, "overflowed on {} field {} type: {}", variant_name, field, detected_on_typ); } } err.emit(); } } } enum Error<'tcx> { Overflow(TypeContext, ty::Ty<'tcx>), } #[derive(Copy, Clone)] enum TypeContext { Root, ADT { def_id: DefId, variant: ast::Name, field: ast::Name, } } struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>, /// types that have already been traversed breadcrumbs: FnvHashSet<Ty<'tcx>>, /// span for error reporting span: Span, /// the scope reachable dtorck types must outlive parent_scope: region::CodeExtent } // `context` is used for reporting overflow errors fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>, context: TypeContext, ty: Ty<'tcx>, depth: usize) -> Result<(), Error<'tcx>> { let tcx = cx.rcx.tcx; // Issue #22443: Watch out for overflow. While we are careful to // handle regular types properly, non-regular ones cause problems. let recursion_limit = tcx.sess.recursion_limit.get(); if depth / 4 >= recursion_limit { // This can get into rather deep recursion, especially in the // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T. // use a higher recursion limit to avoid errors. return Err(Error::Overflow(context, ty)) } // canoncialize the regions in `ty` before inserting - infinitely many // region variables can refer to the same region. let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty); if !cx.breadcrumbs.insert(ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - cached", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); return Ok(()); // we already visited this type } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?}", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // If `typ` has a destructor, then we must ensure that all // borrowed data reachable via `typ` must outlive the parent // of `scope`. This is handled below. // // However, there is an important special case: for any Drop // impl that is tagged as "blind" to their parameters, // we assume that data borrowed via such type parameters // remains unreachable via that Drop impl. // // For example, consider: // // ```rust // #[unsafe_destructor_blind_to_params] // impl<T> Drop for Vec<T> { ... } // ``` // // which does have to be able to drop instances of `T`, but // otherwise cannot read data from `T`. // // Of course, for the type expression passed in for any such // unbounded type parameter `T`, we must resume the recursive // analysis on `T` (since it would be ignored by // type_must_outlive). if has_dtor_of_interest(tcx, ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} - is a dtorck type!", (0..depth).map(|_| ' ').collect::<String>(), ty); cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); return Ok(()); } debug!("iterate_over_potentially_unsafe_regions_in_type \ {}ty: {} scope: {:?} - checking interior", (0..depth).map(|_| ' ').collect::<String>(), ty, cx.parent_scope); // We still need to ensure all referenced data is safe. match ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => { // primitive - definitely safe Ok(()) } ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => { // single-element containers, behave like their element iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData<T> - behaves identically to T let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { for field in variant.fields.iter() { let fty = field.ty(tcx, substs); let fty = cx.rcx.fcx.resolve_type_vars_with_obligations( cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty)); iterate_over_potentially_unsafe_regions_in_type( cx, TypeContext::ADT { def_id: did, field: field.name, variant: variant.name, }, fty, depth+1)? } } Ok(()) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => { for ty in tys { iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? } Ok(()) } ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => { // these always come with a witness of liveness (references // explicitly, pointers implicitly, parameters by the // caller). Ok(()) } ty::TyFnDef(..) | ty::TyFnPtr(_) => { // FIXME(#26656): this type is always destruction-safe, but // it implicitly witnesses Self: Fn, which can be false. Ok(()) } ty::TyInfer(..) | ty::TyError => { tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck"); Ok(()) } // these are always dtorck ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(), } } fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyAdt(def, _) =>
ty::TyTrait(..) | ty::TyProjection(..) | ty::TyAnon(..) => { debug!("ty: {:?} isn't known, and therefore is a dropck type", ty); true }, _ => false } }
{ def.is_dtorck(tcx) }
conditional_block
views.py
from django.shortcuts import render, HttpResponse, redirect from django.http import JsonResponse from django.db.models import Count from django.db.models import F from django.db import transaction from bs4 import BeautifulSoup from django.contrib.auth.decorators import login_required from django.core.mail import send_mail import json from cnblog.settings import * from django.contrib import auth from blog.utils.validCode import get_validCode_img from blog.Userforms import User from blog.models import * def index(request): # 首页 """ 首页 :param request: :return: """ articles = Article.objects.all() return render(request, 'index.html', locals()) def get_data(request, username): """ 取用户对象user,博客对象blog,c_articles,t_articles,c_t_articles,articles :param request: :param username: :return: """ user = UserInfo.objects.filter(username=username).first() # 当前对象 print('user', user) if not user: # 判断是否已经存在 return render(request, 'not_exit.html', locals()) # 当前站点对象 blog = user.blog # 查询当前站点的每一个分类名称以及对应文章数 c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点的每一个标签名称以及对应文章数 t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点每一个年月以及对应文章数 c_t_articles = Article.objects.filter(user=user). \ extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \ values('c_date').annotate(c=Count('nid')).values('c_date', 'c') articles = Article.objects.filter(user=user) return user,c_articles,t_articles,c_t_articles,articles,blog @login_required def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数 """ 个人站点 :param request: :param username: :return: """ if username == request.user.username: user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username) if kwargs: # 个人站点跳转 condition = kwargs['condition'] param = kwargs['param'] if condition == 'category': articles = Article.objects.filter(user=user).filter(category__title=param) if condition == 'tag': articles = Article.objects.filter(user=user).filter(tags__title=param) if condition == 'archive': year, month = param.split('-') articles = Article.objects.filter(user=user)\ .filter(create_time__year=year, create_time__month=month) # USE_TZ = False return render(request, 'home_site.html', locals()) else: return redirect('/login/') def article_detail(request, username, article_id): """ 文章详情页 :param request: :param username: :param article_id: :return: """ article_obj = Article.objects.filter(nid=article_id).first() comments = Comment.objects.filter(article_id=article_id) return render(request, 'article_detail.html', locals()) def digg(request): """ 点赞 :param request: :return: """ is_up = json.loads(request.POST.get('is_up')) # 反序列化 user_id = request.user.pk article_id = request.POST.get('article_id') obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first() response = {'state': False, 'msg': None} if not obj: # 该用户没对本文章进行操作 ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id) queryset = Article.objects.filter(pk=article_id) if is_up: # 更新文章的数据 queryset.update(up_count=F('up_count')+1) else: queryset.update(down_count=F('down_count')+1) else: response['state'] = True if obj.is_up: response['msg'] = '您已经点赞过!' else: response['msg'] = '您已经点踩过!' return JsonResponse(response) def comment(request): article_id = request.POST.get('article_id') pid = request.POST.get('pid') content = request.POST.get('content') user_id = request.user.pk # 事务操作,必须同时成功,同时失败 with transaction.atomic(): ret = Comment.objects.create(user_id=user_id, content=content, article_id=article_id, parent_comment_id=pid) Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1) # 构件根评论添加时所需数据 response = {} response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X") response['username'] = request.user.username response['content'] = ret.content article_obj = Article.objects.filter(nid=article_id).first() # 给该文章作者发送邮件,通知其有人评论 from cnblog.settings import EMAIL_HOST_USER import threading # send_mail( # "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息 # content, # 邮件内容 # EMAIL_HOST_USER, # 发送方 # ['[email protected]'] # 接收方 # ) t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间 "您的文章%s新增了一条评论内容" % article_obj.title, content, EMAIL_HOST_USER, ['[email protected]'] )) t.start() return JsonResponse(response) def get_comment_tree(request): article_id = request.GET.get('article_id') # 转换成数组 ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid')) return JsonResponse(ret, safe=False) # 传列表,需改成false def backend(request, username): # 当前用户文章列表 username = username print(username+'456789') article_list = Article.objects.filter(user__username=username) return render(request, 'backend.html', locals()) def article_del(request): """ 删除文章 :param request: :return: """ username = request.POST.get('username') article_id = request.POST.get('article_id') Article.objects.filter(pk=article_id).delete() Comment.objects.filter(article_id=article_id).delete() return HttpResponse('删除成功!') def article_edit(request, article_id): """ 编辑修改某一篇文章 :param request: :return: """ article_id = article_id article_obj = Article.objects.filter(nid=article_id).first() return render(request, 'article_edit.html', locals()) def article_update(request): username = request.user.username if request.method == 'POST': article_id = request.POST.get('article_id') title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) @login_required def add_article(request): username = request.user.username if request.method == 'POST': title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.create(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) def upload(request): """ 文章的图片上传 :param request: :return: """ img = request.FILES.get('upload_img') # 读取上传的文件 path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径 with open(path, 'wb') as f: # 保存 for i in img: f.write(i) response = { 'error': 0, 'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览 } return HttpResponse(json.dumps(response)) def logout(request): # 注销 auth.logout(request) return redirect('/index/') def login(request): # 登录 if request.method == 'POST': user = request.POST.get('user') pwd = request.POST.get('pwd') validcode = request.POST.get('validcode') # 浏览器提交的 valid_code = request.session.get('valid_code') # 保存在服务器的 resopnse = {'user': None, 'msg': None}
if ret: # 用户存在 auth.login(request, ret) # 当前登录对象 resopnse['user'] = user else: resopnse['msg'] = 'username or password is wromg!' else: resopnse['msg'] = 'valid code error!' return JsonResponse(resopnse) return render(request, 'login.html') def get_validcode_img(request): # 生成随机验证码 data = get_validCode_img(request) return HttpResponse(data) def register(request): """ 注册页面 :param request: :return: """ if request.method == 'POST': # 或者if request.is_ajax 进行判断 form = User(request.POST) # 验证是否合要求 response = {'user': None, 'msg': None} if form.is_valid(): # 信息正确,增加注册用户 response['user'] = form.cleaned_data.get('user') user = form.cleaned_data.get('user') pwd = form.cleaned_data.get('pwd') email = form.cleaned_data.get('email') head_obj = request.FILES.get('avatar') # 文件提取 extra = {} # 额外传的数据都打包成字典 if head_obj: extra['avatar'] = head_obj UserInfo.objects.create_user(username=user, password=pwd, email=email, **extra) else: response['msg'] = form.errors return JsonResponse(response) # Ajax接受JSON文件 else: form = User() return render(request, 'reg.html', locals())
if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写 ret = auth.authenticate(username=user, password=pwd)
random_line_split
views.py
from django.shortcuts import render, HttpResponse, redirect from django.http import JsonResponse from django.db.models import Count from django.db.models import F from django.db import transaction from bs4 import BeautifulSoup from django.contrib.auth.decorators import login_required from django.core.mail import send_mail import json from cnblog.settings import * from django.contrib import auth from blog.utils.validCode import get_validCode_img from blog.Userforms import User from blog.models import * def index(request): # 首页 """ 首页 :param request: :return: """ articles = Article.objects.all() return render(request, 'index.html', locals()) def get_data(request, username): """ 取用户对象user,博客对象blog,c_articles,t_articles,c_t_articles,articles :param request: :param username: :return: """ user = UserInfo.objects.filter(username=username).first() # 当前对象 print('user', user) if not user: # 判断是否已经存在 return render(request, 'not_exit.html', locals()) # 当前站点对象 blog = user.blog # 查询当前站点的每一个分类名称以及对应文章数 c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点的每一个标签名称以及对应文章数 t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点每一个年月以及对应文章数 c_t_articles = Article.objects.filter(user=user). \ extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \ values('c_date').annotate(c=Count('nid')).values('c_date', 'c') articles = Article.objects.filter(user=user) return user,c_articles,t_articles,c_t_articles,articles,blog @login_required def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数 """ 个人站点 :param request: :param username: :return: """ if username == request.user.username: user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username) if kwargs: # 个人站点跳转 condition = kwargs['condition'] param = kwargs['param'] if condition == 'category': articles = Article.objects.filter(user=user).filter(category__title=param) if condition == 'tag': articles = Article.objects.filter(user=user).filter(tags__title=param) if condition == 'archive': year, month = param.split('-') articles = Article.objects.filter(user=user)\ .filter(create_time__year=year, create_time__month=month) # USE_TZ = False return render(request, 'home_site.html', locals()) else: return redirect('/login/') def article_detail(request, username, article_id): """ 文章详情页 :param request: :param username: :param article_id: :return: """ article_obj = Article.objects.filter(nid=article_id).first() comments = Comment.objects.filter(article_id=article_id) return
.filter(user_id=user_id, article_id=article_id).first() response = {'state': False, 'msg': None} if not obj: # 该用户没对本文章进行操作 ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id) queryset = Article.objects.filter(pk=article_id) if is_up: # 更新文章的数据 queryset.update(up_count=F('up_count')+1) else: queryset.update(down_count=F('down_count')+1) else: response['state'] = True if obj.is_up: response['msg'] = '您已经点赞过!' else: response['msg'] = '您已经点踩过!' return JsonResponse(response) def comment(request): article_id = request.POST.get('article_id') pid = request.POST.get('pid') content = request.POST.get('content') user_id = request.user.pk # 事务操作,必须同时成功,同时失败 with transaction.atomic(): ret = Comment.objects.create(user_id=user_id, content=content, article_id=article_id, parent_comment_id=pid) Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1) # 构件根评论添加时所需数据 response = {} response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X") response['username'] = request.user.username response['content'] = ret.content article_obj = Article.objects.filter(nid=article_id).first() # 给该文章作者发送邮件,通知其有人评论 from cnblog.settings import EMAIL_HOST_USER import threading # send_mail( # "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息 # content, # 邮件内容 # EMAIL_HOST_USER, # 发送方 # ['[email protected]'] # 接收方 # ) t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间 "您的文章%s新增了一条评论内容" % article_obj.title, content, EMAIL_HOST_USER, ['[email protected]'] )) t.start() return JsonResponse(response) def get_comment_tree(request): article_id = request.GET.get('article_id') # 转换成数组 ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid')) return JsonResponse(ret, safe=False) # 传列表,需改成false def backend(request, username): # 当前用户文章列表 username = username print(username+'456789') article_list = Article.objects.filter(user__username=username) return render(request, 'backend.html', locals()) def article_del(request): """ 删除文章 :param request: :return: """ username = request.POST.get('username') article_id = request.POST.get('article_id') Article.objects.filter(pk=article_id).delete() Comment.objects.filter(article_id=article_id).delete() return HttpResponse('删除成功!') def article_edit(request, article_id): """ 编辑修改某一篇文章 :param request: :return: """ article_id = article_id article_obj = Article.objects.filter(nid=article_id).first() return render(request, 'article_edit.html', locals()) def article_update(request): username = request.user.username if request.method == 'POST': article_id = request.POST.get('article_id') title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) @login_required def add_article(request): username = request.user.username if request.method == 'POST': title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.create(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) def upload(request): """ 文章的图片上传 :param request: :return: """ img = request.FILES.get('upload_img') # 读取上传的文件 path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径 with open(path, 'wb') as f: # 保存 for i in img: f.write(i) response = { 'error': 0, 'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览 } return HttpResponse(json.dumps(response)) def logout(request): # 注销 auth.logout(request) return redirect('/index/') def login(request): # 登录 if request.method == 'POST': user = request.POST.get('user') pwd = request.POST.get('pwd') validcode = request.POST.get('validcode') # 浏览器提交的 valid_code = request.session.get('valid_code') # 保存在服务器的 resopnse = {'user': None, 'msg': None} if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写 ret = auth.authenticate(username=user, password=pwd) if ret: # 用户存在 auth.login(request, ret) # 当前登录对象 resopnse['user'] = user else: resopnse['msg'] = 'username or password is wromg!' else: resopnse['msg'] = 'valid code error!' return JsonResponse(resopnse) return render(request, 'login.html') def get_validcode_img(request): # 生成随机验证码 data = get_validCode_img(request) return HttpResponse(data) def register(request): """ 注册页面 :param request: :return: """ if request.method == 'POST': # 或者if request.is_ajax 进行判断 form = User(request.POST) # 验证是否合要求 response = {'user': None, 'msg': None} if form.is_valid(): # 信息正确,增加注册用户 response['user'] = form.cleaned_data.get('user') user = form.cleaned_data.get('user') pwd = form.cleaned_data.get('pwd') email = form.cleaned_data.get('email') head_obj = request.FILES.get('avatar') # 文件提取 extra = {} # 额外传的数据都打包成字典 if head_obj: extra['avatar'] = head_obj UserInfo.objects.create_user(username=user, password=pwd, email=email, **extra) else: response['msg'] = form.errors return JsonResponse(response) # Ajax接受JSON文件 else: form = User() return render(request, 'reg.html', locals())
render(request, 'article_detail.html', locals()) def digg(request): """ 点赞 :param request: :return: """ is_up = json.loads(request.POST.get('is_up')) # 反序列化 user_id = request.user.pk article_id = request.POST.get('article_id') obj = ArticleUpDown.objects
identifier_body
views.py
from django.shortcuts import render, HttpResponse, redirect from django.http import JsonResponse from django.db.models import Count from django.db.models import F from django.db import transaction from bs4 import BeautifulSoup from django.contrib.auth.decorators import login_required from django.core.mail import send_mail import json from cnblog.settings import * from django.contrib import auth from blog.utils.validCode import get_validCode_img from blog.Userforms import User from blog.models import * def index(request): # 首页 """ 首页 :param request: :return: """ articles = Article.objects.all() return render(request, 'index.html', locals()) def get_data(request, username): """ 取用户对象user,博客对象blog,c_articles,t_articles,c_t_articles,articles :param request: :param username: :return: """ user = UserInfo.objects.filter(username=username).first() # 当前对象 print('user', user) if not user: # 判断是否已经存在 return render(request, 'not_exit.html', locals()) # 当前站点对象 blog = user.blog # 查询当前站点的每一个分类名称以及对应文章数 c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点的每一个标签名称以及对应文章数 t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点每一个年月以及对应文章数 c_t_articles = Article.objects.filter(user=user). \ extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \ values('c_date').annotate(c=Count('nid')).values('c_date', 'c') articles = Article.objects.filter(user=user) return user,c_articles,t_articles,c_t_articles,articles,blog @login_required def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数 """ 个人站点 :param request: :param username: :return: """ if username == request.user.username: user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username) if kwargs: # 个人站点跳转 condition = kwargs['condition'] param = kwargs['param'] if condition == 'category': articles = Article.objects.filter(user=user).filter(category__title=param) if condition == 'tag': articles = Article.objects.filter(user=user).filter(tags__title=param) if condition == 'archive': year, month = param.split('-') articles = Article.objects.filter(user=user)\ .filter(create_time__year=year, create_time__month=month) # USE_TZ = False return render(request, 'home_site.html', locals()) else: return redirect('/login/') def article_detail(request, username, article_id): """ 文章详情页 :param request: :param username: :param article_id: :return: """ article_obj = Article.objects.filter(nid=article_id).first() comments = Comment.objects.filter(article_id=article_id) return render(request, 'article_detail.html', locals()) def digg(request): """ 点赞 :param request: :return: """ is_up = json.loads(request.POST.get('is_up')) # 反序列化 user_id = request.user.pk article_id = request.POST.get('article_id') obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first() response = {'state': False, 'msg': None} if not obj: # 该用户没对本文章进行操作 ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id) queryset = Article.objects.filter(pk=article_id) if is_up: # 更新文章的数据 queryset.update(up_count=F('up_count')+1) else: queryset.update(down_count=F('down_count')+1) else: response['state'] = True if obj.is_up: response['msg'] = '您已经点赞过!' else: response['msg'] = '您已经点踩过!' return JsonResponse(response) def comment(request): article_id = request.POST.get('article_id') pid = request.POST.get('pid') content = request.POST.get('content') user_id = request.user.pk # 事务操作,必须同时成功,同时失败 with transaction.atomic(): ret = Comment.objects.create(user_id=user_id, content=content, article_id=article_id, parent_comment_id=pid) Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1) # 构件根评论添加时所需数据 response = {} response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X") response['username'] = request.user.username response['content'] = ret.content article_obj = Article.objects.filter(nid=article_id).first() # 给该文章作者发送邮件,通知其有人评论 from cnblog.settings import EMAIL_HOST_USER import threading # send_mail( # "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息 # content, # 邮件内容 # EMAIL_HOST_USER, # 发送方 # ['[email protected]'] # 接收方 # ) t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间 "您的文章%s新增了一条评论内容" % article_obj.title, content, EMAIL_HOST_USER, ['[email protected]'] )) t.start() return JsonResponse(response) def get_comment_tree(request): article_id = request.GET.get('article_id') # 转换成数组 ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid')) return JsonResponse(ret, safe=False) # 传列表,需改成false def backend(request, username): # 当前用户文章列表 username = username print(username+'456789') article_list = Article.objects.filter(user__username=username) return render(request, 'backend.html', locals()) def article_del(request): """ 删除文章 :param request: :return: """ username = request.POST.get('username') article_id = request.POST.get('article_id') Article.objects.filter(pk=article_id).delete() Comment.objects.filter(article_id=article_id).delete() return HttpResponse('删除成功!')
ticle_edit(request, article_id): """ 编辑修改某一篇文章 :param request: :return: """ article_id = article_id article_obj = Article.objects.filter(nid=article_id).first() return render(request, 'article_edit.html', locals()) def article_update(request): username = request.user.username if request.method == 'POST': article_id = request.POST.get('article_id') title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) @login_required def add_article(request): username = request.user.username if request.method == 'POST': title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.create(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) def upload(request): """ 文章的图片上传 :param request: :return: """ img = request.FILES.get('upload_img') # 读取上传的文件 path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径 with open(path, 'wb') as f: # 保存 for i in img: f.write(i) response = { 'error': 0, 'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览 } return HttpResponse(json.dumps(response)) def logout(request): # 注销 auth.logout(request) return redirect('/index/') def login(request): # 登录 if request.method == 'POST': user = request.POST.get('user') pwd = request.POST.get('pwd') validcode = request.POST.get('validcode') # 浏览器提交的 valid_code = request.session.get('valid_code') # 保存在服务器的 resopnse = {'user': None, 'msg': None} if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写 ret = auth.authenticate(username=user, password=pwd) if ret: # 用户存在 auth.login(request, ret) # 当前登录对象 resopnse['user'] = user else: resopnse['msg'] = 'username or password is wromg!' else: resopnse['msg'] = 'valid code error!' return JsonResponse(resopnse) return render(request, 'login.html') def get_validcode_img(request): # 生成随机验证码 data = get_validCode_img(request) return HttpResponse(data) def register(request): """ 注册页面 :param request: :return: """ if request.method == 'POST': # 或者if request.is_ajax 进行判断 form = User(request.POST) # 验证是否合要求 response = {'user': None, 'msg': None} if form.is_valid(): # 信息正确,增加注册用户 response['user'] = form.cleaned_data.get('user') user = form.cleaned_data.get('user') pwd = form.cleaned_data.get('pwd') email = form.cleaned_data.get('email') head_obj = request.FILES.get('avatar') # 文件提取 extra = {} # 额外传的数据都打包成字典 if head_obj: extra['avatar'] = head_obj UserInfo.objects.create_user(username=user, password=pwd, email=email, **extra) else: response['msg'] = form.errors return JsonResponse(response) # Ajax接受JSON文件 else: form = User() return render(request, 'reg.html', locals())
def ar
identifier_name
views.py
from django.shortcuts import render, HttpResponse, redirect from django.http import JsonResponse from django.db.models import Count from django.db.models import F from django.db import transaction from bs4 import BeautifulSoup from django.contrib.auth.decorators import login_required from django.core.mail import send_mail import json from cnblog.settings import * from django.contrib import auth from blog.utils.validCode import get_validCode_img from blog.Userforms import User from blog.models import * def index(request): # 首页 """ 首页 :param request: :return: """ articles = Article.objects.all() return render(request, 'index.html', locals()) def get_data(request, username): """ 取用户对象user,博客对象blog,c_articles,t_articles,c_t_articles,articles :param request: :param username: :return: """ user = UserInfo.objects.filter(username=username).first() # 当前对象 print('user', user) if not user: # 判断是否已经存在 return render(request, 'not_exit.html', locals()) # 当前站点对象 blog = user.blog # 查询当前站点的每一个分类名称以及对应文章数 c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点的每一个标签名称以及对应文章数 t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values( 'title', 'c') # 查询当前站点每一个年月以及对应文章数 c_t_articles = Article.objects.filter(user=user). \ extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \ values('c_date').annotate(c=Count('nid')).values('c_date', 'c') articles = Article.objects.filter(user=user) return user,c_articles,t_articles,c_t_articles,articles,blog @login_required def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数 """ 个人站点 :param request: :param username: :return: """ if username == request.user.username: user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username) if kwargs: # 个人站点跳转 condition = kwargs['condition'] param = kwargs['param'] if condition == 'category': articles = Article.objects.filter(user=user).filter(category__title=param) if condition == 'tag': articles = Article.objects.filter(user=user).filter(tags__title=param) if condition == 'archive': year, month = param.split('-') articles = Article.objects.filter(user=user)\ .filter(create_time__year=year, create_time__month=month) # USE_TZ = False return render(request, 'home_site.html', locals()) else: return redirect('/login/') def article_detail(request, username, article_id): """ 文章详情页 :param request: :param username: :param article_id: :return: """ article_obj = Article.objects.filter(nid=article_id).first() comments = Comment.objects.filter(article_id=article_id) return render(request, 'article_detail.html', locals()) def digg(request): """ 点赞 :param request: :return: """ is_up = json.loads(request.POST.get('is_up')) # 反序列化 user_id = request.user.pk article_id = request.POST.get('article_id') obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first() response = {'state': False, 'msg': None} if not obj: # 该用户没对本文章进行操作 ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id) queryset = Article.objects.filter(pk=article_id) if is_up: # 更新文章的数据 queryset.update(up_count=F('up_count')+1) else: queryset.update(down_count=F('down_count')+1) else: response['state'] = True if obj.is_up: response['msg'] = '您已经点赞过!' else: response['msg'] = '您已经点踩过!' return JsonResponse(response) def comment(request): article_id = request.POST.get('article_id') pid = request.POST.get('pid') content = request.POST.get('content') user_id = request.user.pk # 事务操作,必须同时成功,同时失败 with transaction.atomic(): ret = Comment.objects.create(user_id=user_id, content=content, article_id=article_id, parent_comment_id=pid) Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1) # 构件根评论添加时所需数据 response = {} response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X") response['username'] = request.user.username response['content'] = ret.content article_obj = Article.objects.filter(nid=article_id).first() # 给该文章作者发送邮件,通知其有人评论 from cnblog.settings import EMAIL_HOST_USER import threading # send_mail( # "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息 # content, # 邮件内容 # EMAIL_HOST_USER, # 发送方 # ['[email protected]'] # 接收方 # ) t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间 "您的文章%s新增了一条评论内容" % article_obj.title, content, EMAIL_HOST_USER, ['[email protected]'] )) t.start() return JsonResponse(response) def get_comment_tree(request): article_id = request.GET.get('article_id') # 转换成数组 ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid')) return JsonResponse(ret, safe=False) # 传列表,需改成false def backend(request, username): # 当前用户文章列表 username = username print(username+'456789') article_list = Article.objects.filter(user__username=username) return render(request, 'backend.html', locals()) def article_del(request): """ 删除文章 :param request: :return: """ username = request.POST.get('username') article_id = request.POST.get('article_id') Article.objects.filter(pk=article_id).delete() Comment.objects.filter(article_id=article_id).delete() return HttpResponse('删除成功!') def article_edit(request, article_id): """ 编辑修改某一篇文章 :param request: :return: """ article_id = article_id article_obj = Article.objects.filter(nid=article_id).first() return render(request, 'article_edit.html', locals()) def article_update(request): username = request.user.username if request.method == 'POST': article_id = request.POST.get('article_id') title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) @login_required def add_article(request): username = request.user.username if request.method == 'POST': title = request.POST.get('title') content = request.POST.get('content') print('content', content) # 提取文章描述信息desc soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all(): if tag.name == 'script': tag.decompose() # 删除非法信息,防止xss攻击 desc = soup.text[0:150] +'' # 只提取150个字节的文本信息 Article.objects.create(title=title, content=content, user=request.user, desc=desc) return redirect('/%s/backend/' % username) return render(request, 'add_article.html', locals()) def upload(request): """ 文章的图片上传 :param request: :return: """ img = request.FILES.get('upload_img') # 读取上传的文件 path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径 with open(path, 'wb') as f: # 保存 for i in img: f.write(i) response = { 'error': 0, 'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览 } return HttpResponse(json.dumps(response)) def logout(request): # 注销 auth.logout(request) return redirect('/index/') def login(request): # 登录 if request.method == 'POST': user = request.POST.get('user') pwd = request.POST.get('pwd') validcode = request.POST.get('validcode') # 浏览器提交的 valid_code = request.session.get('valid_code') # 保存在服务器的 resopnse = {'user': None, 'msg': None} if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写 ret = auth.authenticate(username=user, password=pwd) if ret: # 用户存在 auth.login(request, ret) # 当前登录对象 resopnse['user'] = user else: resopnse['msg'] = 'username or password is wromg!' else: resopnse['msg'] = 'valid code error!' return JsonResponse(resopnse) return render(request, 'login.html') def get_validcode_img(request): # 生成随机验证码 data = get_validCode_img(request) return HttpResponse(data) def register(request): """ 注册页面 :param request: :return: """ if request.method == 'POST': # 或者if request.is_ajax 进行判断 form = User(request.POST) # 验证是否合要求 response = {'user': None, 'msg': None} if form.is_valid(): # 信息正确,增加注册用户 response['user'] = form.cleaned_data.get('user') user = form.cleaned_data.get('user') pwd = form.cleaned_data.get('pwd') email = form.cleaned_data.get('email') head_obj = request.FILES.get('avatar') # 文件提取 extra = {} # 额外传的数据都打包成字典 if head_obj: extra['avatar'] = head_obj UserInfo.objects.create_user(username=user, password=pwd, email=email, **extra) else: response['msg'] = form.errors return JsonResponse(response) # Ajax接受JSON文件 else: form = User() return render(request, 'reg.html', locals())
conditional_block
ui.rs
use failure::{bail, ensure, format_err, Error, Fallible}; use std::cell::{RefCell, RefMut}; use std::io::Read; use std::rc::Rc; use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE}; use crate::util::to_hex_string; use crate::{Reader, ReaderFactory, Writer}; const ERROR_VERBOSITY: i32 = -1; const INTERACTIVE_VERBOSITY: i32 = -1; // User interaction interface. pub trait UI { // Initialization fn set_verbosity(&mut self, verbosity: i32); fn set_progress_enabled(&mut self, enabled: bool); // Environment information fn program_name(&self) -> &str; // Write/Print interface fn will_print(&self, verbosity: i32) -> bool; fn print(&self, verbosity: i32, message: &str) -> Fallible<()>; fn print_error(&self, err: &Error) -> Fallible<()>; fn println_interactive(&self, message: &str) -> Fallible<()>; fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>; fn println(&self, verbosity: i32, message: &str) -> Fallible<()> { self.print(verbosity, &format!("{}\n", message)) } // Read interface fn can_read(&self) -> bool; fn read_prompt(&self, prompt: &str) -> Fallible<String>; fn set_stdin_echo(&self, enable: bool); fn read_prompt_bool( &self, verbosity: i32, prompt: &str, default: bool, ) -> Fallible<Option<bool>> { if !self.can_read() || !self.will_print(verbosity) { return Ok(None); } let yn_helper = if default { "[Y/n]" } else { "[y/N]" }; let prompt = format!("{} {}: ", prompt, yn_helper); loop { match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() { "y" | "yes" => return Ok(Some(true)), "n" | "no" => return Ok(Some(false)), "" => return Ok(Some(default)), _ => { self.println_interactive("Invalid input, please enter 'y' or 'n'.")?; } } } } fn read_password(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); self.set_stdin_echo(false); let res = self.read_prompt(prompt); self.set_stdin_echo(true); // With echo off we don't get the newline character from input; we need to output it ourselves. self.println_interactive("")?; res } } pub struct BasicUI { program_name: String, input: Rc<RefCell<Option<Reader>>>, output: RefCell<Writer>, input_is_tty: bool, output_is_tty: bool, verbosity: i32, progress_enabled: bool, } impl BasicUI { pub fn new( program_name: String, input: Reader, input_is_tty: bool, output: Writer, output_is_tty: bool, ) -> BasicUI { BasicUI { program_name, input: Rc::new(RefCell::new(Some(input))), input_is_tty, output: RefCell::new(output), output_is_tty, verbosity: 0, progress_enabled: true, } } // Create a function that extracts input stream from this struct, returning it to the caller. // After returned function is called, this struct loses input stream and with it the ability to // prompt user for input/passwords. pub fn input_stream_extractor(&mut self) -> ReaderFactory { let input = Rc::clone(&self.input); Box::new(move || Ok(input.borrow_mut().take().unwrap())) } } impl UI for BasicUI { fn set_verbosity(&mut self, verbosity: i32) { self.verbosity = verbosity; } fn set_progress_enabled(&mut self, enabled: bool) { self.progress_enabled = enabled; } fn program_name(&self) -> &str { &self.program_name } // Write interface fn will_print(&self, verbosity: i32) -> bool { verbosity <= self.verbosity } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { if self.will_print(verbosity) { self.output.borrow_mut().write_all(message.as_bytes())?; } Ok(()) } fn print_error(&self, err: &Error) -> Fallible<()> { if self.will_print(ERROR_VERBOSITY)
Ok(()) } fn println_interactive(&self, message: &str) -> Fallible<()> { if self.will_print(INTERACTIVE_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}", message)?; } Ok(()) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { if self.progress_enabled { let last_char = if finish { "\n" } else { "\r" }; let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char); self.print(verbosity, &message)?; } Ok(()) } // Read interface fn can_read(&self) -> bool { self.input.borrow().is_some() && self.input_is_tty && self.output_is_tty && self.will_print(INTERACTIVE_VERBOSITY) } fn read_prompt(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); let mut output = self.output.borrow_mut(); let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap()); write!(output, "{}", prompt)?; // Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'. let mut char_bytes = vec![]; let mut res = String::new(); for byte in input.by_ref().bytes() { char_bytes.push(byte?); match std::str::from_utf8(&char_bytes) { Ok(valid_char) => { match valid_char { "\n" => { if res.ends_with('\r') { res.pop(); // Handle Windows CRLF. } return Ok(res); } valid_char => res.push_str(valid_char), } char_bytes.clear(); } Err(utf_err) => match utf_err.error_len() { None => (), // Incomplete character - get more bytes. Some(_) => bail!( "Error reading from stdin: Non-UTF8 byte sequence encountered: {}", to_hex_string(char_bytes) ), }, } } Err(format_err!("Error reading from stdin: EOF")) } fn set_stdin_echo(&self, enable: bool) { set_stdin_echo(enable); } } #[cfg(test)] pub mod test_helpers { use super::*; use std::collections::VecDeque; #[derive(Debug, PartialEq, Clone, Copy)] pub enum PrintType { Log { verbosity: i32 }, Error, Interactive, Progress { verbosity: i32, finish: bool }, } #[derive(Default)] pub struct TestUI { pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>, pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>, } impl TestUI { pub fn new() -> TestUI { TestUI { ..Default::default() } } pub fn expect_prompt( self, matcher: impl AsRef<str>, reply: Result<impl AsRef<str>, Error>, ) -> Self { self.prompt_replies.borrow_mut().push_back(( Some(matcher.as_ref().to_string()), reply.map(|s| s.as_ref().to_string()), )); self } pub fn expect_all_prompts_asked(&self) { assert_eq!(self.prompt_replies.borrow_mut().len(), 0); } fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> { let message = message.as_ref(); let lines = message.lines().collect::<Vec<_>>(); let lines_len = lines.len(); let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| { let line_finished = idx < lines_len - 1 || message.ends_with('\n'); (typ, line.to_string(), line_finished) }); let mut printed_lines = self.printed_lines.borrow_mut(); // Append to last line if it has the same type if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() { if *last_typ == typ && !*last_line_finished { if let Some((_, line, finished)) = line_tuples.next() { last_line.push_str(&line); *last_line_finished = finished; } } } printed_lines.extend(line_tuples); Ok(()) } } impl UI for TestUI { fn set_verbosity(&mut self, _verbosity: i32) {} fn set_progress_enabled(&mut self, _enabled: bool) {} fn program_name(&self) -> &str { "rypt" } // Write interface fn will_print(&self, _verbosity: i32) -> bool { true } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { self.append_printed_lines(PrintType::Log { verbosity }, message) } fn print_error(&self, err: &Error) -> Result<(), Error> { self.append_printed_lines(PrintType::Error, &format!("{}", err)) } fn println_interactive(&self, message: &str) -> Result<(), Error> { self.append_printed_lines(PrintType::Interactive, message) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { self.append_printed_lines(PrintType::Progress { verbosity, finish }, message) } // Read interface fn can_read(&self) -> bool { true } fn read_prompt(&self, prompt: &str) -> Result<String, Error> { let (matcher, reply) = self .prompt_replies .borrow_mut() .pop_front() .unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt)); if let Some(matcher) = matcher { assert!( prompt.contains(&matcher), "Unexpected prompt in TestUI: '{}', was looking for '{}'", prompt, matcher ); } reply } fn set_stdin_echo(&self, _enable: bool) {} } }
{ writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?; }
conditional_block
ui.rs
use failure::{bail, ensure, format_err, Error, Fallible}; use std::cell::{RefCell, RefMut}; use std::io::Read; use std::rc::Rc; use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE}; use crate::util::to_hex_string; use crate::{Reader, ReaderFactory, Writer}; const ERROR_VERBOSITY: i32 = -1; const INTERACTIVE_VERBOSITY: i32 = -1; // User interaction interface. pub trait UI { // Initialization fn set_verbosity(&mut self, verbosity: i32); fn set_progress_enabled(&mut self, enabled: bool); // Environment information fn program_name(&self) -> &str; // Write/Print interface fn will_print(&self, verbosity: i32) -> bool; fn print(&self, verbosity: i32, message: &str) -> Fallible<()>; fn print_error(&self, err: &Error) -> Fallible<()>; fn println_interactive(&self, message: &str) -> Fallible<()>; fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>; fn println(&self, verbosity: i32, message: &str) -> Fallible<()> { self.print(verbosity, &format!("{}\n", message)) } // Read interface fn can_read(&self) -> bool; fn read_prompt(&self, prompt: &str) -> Fallible<String>; fn set_stdin_echo(&self, enable: bool); fn read_prompt_bool( &self, verbosity: i32, prompt: &str, default: bool, ) -> Fallible<Option<bool>> { if !self.can_read() || !self.will_print(verbosity) { return Ok(None); } let yn_helper = if default { "[Y/n]" } else { "[y/N]" }; let prompt = format!("{} {}: ", prompt, yn_helper); loop { match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() { "y" | "yes" => return Ok(Some(true)), "n" | "no" => return Ok(Some(false)), "" => return Ok(Some(default)), _ => { self.println_interactive("Invalid input, please enter 'y' or 'n'.")?; } } } } fn read_password(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); self.set_stdin_echo(false); let res = self.read_prompt(prompt); self.set_stdin_echo(true); // With echo off we don't get the newline character from input; we need to output it ourselves. self.println_interactive("")?; res } } pub struct BasicUI { program_name: String, input: Rc<RefCell<Option<Reader>>>, output: RefCell<Writer>, input_is_tty: bool, output_is_tty: bool, verbosity: i32, progress_enabled: bool, } impl BasicUI { pub fn new( program_name: String, input: Reader, input_is_tty: bool, output: Writer, output_is_tty: bool, ) -> BasicUI { BasicUI { program_name, input: Rc::new(RefCell::new(Some(input))), input_is_tty, output: RefCell::new(output), output_is_tty, verbosity: 0, progress_enabled: true, } } // Create a function that extracts input stream from this struct, returning it to the caller. // After returned function is called, this struct loses input stream and with it the ability to // prompt user for input/passwords. pub fn input_stream_extractor(&mut self) -> ReaderFactory { let input = Rc::clone(&self.input); Box::new(move || Ok(input.borrow_mut().take().unwrap())) } } impl UI for BasicUI { fn set_verbosity(&mut self, verbosity: i32) { self.verbosity = verbosity; } fn set_progress_enabled(&mut self, enabled: bool) { self.progress_enabled = enabled; } fn program_name(&self) -> &str { &self.program_name } // Write interface fn will_print(&self, verbosity: i32) -> bool { verbosity <= self.verbosity } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { if self.will_print(verbosity) { self.output.borrow_mut().write_all(message.as_bytes())?; } Ok(()) } fn print_error(&self, err: &Error) -> Fallible<()> { if self.will_print(ERROR_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?; } Ok(()) } fn println_interactive(&self, message: &str) -> Fallible<()> { if self.will_print(INTERACTIVE_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}", message)?; } Ok(()) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { if self.progress_enabled { let last_char = if finish { "\n" } else { "\r" }; let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char); self.print(verbosity, &message)?; } Ok(()) } // Read interface fn can_read(&self) -> bool { self.input.borrow().is_some() && self.input_is_tty && self.output_is_tty && self.will_print(INTERACTIVE_VERBOSITY) } fn read_prompt(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); let mut output = self.output.borrow_mut(); let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap()); write!(output, "{}", prompt)?; // Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'. let mut char_bytes = vec![]; let mut res = String::new(); for byte in input.by_ref().bytes() { char_bytes.push(byte?); match std::str::from_utf8(&char_bytes) { Ok(valid_char) => { match valid_char { "\n" => { if res.ends_with('\r') { res.pop(); // Handle Windows CRLF. } return Ok(res); } valid_char => res.push_str(valid_char), } char_bytes.clear(); } Err(utf_err) => match utf_err.error_len() { None => (), // Incomplete character - get more bytes. Some(_) => bail!( "Error reading from stdin: Non-UTF8 byte sequence encountered: {}", to_hex_string(char_bytes) ), }, } } Err(format_err!("Error reading from stdin: EOF")) } fn set_stdin_echo(&self, enable: bool) { set_stdin_echo(enable); } } #[cfg(test)] pub mod test_helpers { use super::*; use std::collections::VecDeque; #[derive(Debug, PartialEq, Clone, Copy)] pub enum PrintType { Log { verbosity: i32 }, Error, Interactive, Progress { verbosity: i32, finish: bool }, } #[derive(Default)] pub struct TestUI { pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>, pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>, } impl TestUI { pub fn new() -> TestUI
pub fn expect_prompt( self, matcher: impl AsRef<str>, reply: Result<impl AsRef<str>, Error>, ) -> Self { self.prompt_replies.borrow_mut().push_back(( Some(matcher.as_ref().to_string()), reply.map(|s| s.as_ref().to_string()), )); self } pub fn expect_all_prompts_asked(&self) { assert_eq!(self.prompt_replies.borrow_mut().len(), 0); } fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> { let message = message.as_ref(); let lines = message.lines().collect::<Vec<_>>(); let lines_len = lines.len(); let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| { let line_finished = idx < lines_len - 1 || message.ends_with('\n'); (typ, line.to_string(), line_finished) }); let mut printed_lines = self.printed_lines.borrow_mut(); // Append to last line if it has the same type if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() { if *last_typ == typ && !*last_line_finished { if let Some((_, line, finished)) = line_tuples.next() { last_line.push_str(&line); *last_line_finished = finished; } } } printed_lines.extend(line_tuples); Ok(()) } } impl UI for TestUI { fn set_verbosity(&mut self, _verbosity: i32) {} fn set_progress_enabled(&mut self, _enabled: bool) {} fn program_name(&self) -> &str { "rypt" } // Write interface fn will_print(&self, _verbosity: i32) -> bool { true } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { self.append_printed_lines(PrintType::Log { verbosity }, message) } fn print_error(&self, err: &Error) -> Result<(), Error> { self.append_printed_lines(PrintType::Error, &format!("{}", err)) } fn println_interactive(&self, message: &str) -> Result<(), Error> { self.append_printed_lines(PrintType::Interactive, message) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { self.append_printed_lines(PrintType::Progress { verbosity, finish }, message) } // Read interface fn can_read(&self) -> bool { true } fn read_prompt(&self, prompt: &str) -> Result<String, Error> { let (matcher, reply) = self .prompt_replies .borrow_mut() .pop_front() .unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt)); if let Some(matcher) = matcher { assert!( prompt.contains(&matcher), "Unexpected prompt in TestUI: '{}', was looking for '{}'", prompt, matcher ); } reply } fn set_stdin_echo(&self, _enable: bool) {} } }
{ TestUI { ..Default::default() } }
identifier_body
ui.rs
use failure::{bail, ensure, format_err, Error, Fallible}; use std::cell::{RefCell, RefMut}; use std::io::Read; use std::rc::Rc; use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE}; use crate::util::to_hex_string; use crate::{Reader, ReaderFactory, Writer}; const ERROR_VERBOSITY: i32 = -1; const INTERACTIVE_VERBOSITY: i32 = -1; // User interaction interface. pub trait UI { // Initialization fn set_verbosity(&mut self, verbosity: i32); fn set_progress_enabled(&mut self, enabled: bool); // Environment information fn program_name(&self) -> &str; // Write/Print interface fn will_print(&self, verbosity: i32) -> bool; fn print(&self, verbosity: i32, message: &str) -> Fallible<()>; fn print_error(&self, err: &Error) -> Fallible<()>; fn println_interactive(&self, message: &str) -> Fallible<()>; fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>; fn println(&self, verbosity: i32, message: &str) -> Fallible<()> { self.print(verbosity, &format!("{}\n", message)) } // Read interface fn can_read(&self) -> bool; fn read_prompt(&self, prompt: &str) -> Fallible<String>; fn set_stdin_echo(&self, enable: bool); fn read_prompt_bool( &self, verbosity: i32, prompt: &str, default: bool, ) -> Fallible<Option<bool>> { if !self.can_read() || !self.will_print(verbosity) { return Ok(None); } let yn_helper = if default { "[Y/n]" } else { "[y/N]" }; let prompt = format!("{} {}: ", prompt, yn_helper); loop { match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() { "y" | "yes" => return Ok(Some(true)), "n" | "no" => return Ok(Some(false)), "" => return Ok(Some(default)), _ => { self.println_interactive("Invalid input, please enter 'y' or 'n'.")?; } } } } fn read_password(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); self.set_stdin_echo(false); let res = self.read_prompt(prompt); self.set_stdin_echo(true); // With echo off we don't get the newline character from input; we need to output it ourselves. self.println_interactive("")?; res } } pub struct BasicUI { program_name: String, input: Rc<RefCell<Option<Reader>>>, output: RefCell<Writer>, input_is_tty: bool, output_is_tty: bool, verbosity: i32, progress_enabled: bool, } impl BasicUI { pub fn new( program_name: String, input: Reader, input_is_tty: bool, output: Writer, output_is_tty: bool, ) -> BasicUI { BasicUI { program_name, input: Rc::new(RefCell::new(Some(input))), input_is_tty, output: RefCell::new(output), output_is_tty, verbosity: 0, progress_enabled: true, } } // Create a function that extracts input stream from this struct, returning it to the caller. // After returned function is called, this struct loses input stream and with it the ability to // prompt user for input/passwords. pub fn input_stream_extractor(&mut self) -> ReaderFactory { let input = Rc::clone(&self.input); Box::new(move || Ok(input.borrow_mut().take().unwrap())) } } impl UI for BasicUI { fn set_verbosity(&mut self, verbosity: i32) { self.verbosity = verbosity; } fn set_progress_enabled(&mut self, enabled: bool) { self.progress_enabled = enabled; } fn program_name(&self) -> &str { &self.program_name } // Write interface fn will_print(&self, verbosity: i32) -> bool { verbosity <= self.verbosity } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { if self.will_print(verbosity) { self.output.borrow_mut().write_all(message.as_bytes())?; } Ok(()) } fn print_error(&self, err: &Error) -> Fallible<()> { if self.will_print(ERROR_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?; } Ok(()) } fn println_interactive(&self, message: &str) -> Fallible<()> { if self.will_print(INTERACTIVE_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}", message)?; } Ok(()) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { if self.progress_enabled { let last_char = if finish { "\n" } else { "\r" }; let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char); self.print(verbosity, &message)?; } Ok(()) } // Read interface fn can_read(&self) -> bool { self.input.borrow().is_some() && self.input_is_tty && self.output_is_tty && self.will_print(INTERACTIVE_VERBOSITY) } fn read_prompt(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); let mut output = self.output.borrow_mut(); let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap()); write!(output, "{}", prompt)?; // Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'. let mut char_bytes = vec![]; let mut res = String::new(); for byte in input.by_ref().bytes() { char_bytes.push(byte?); match std::str::from_utf8(&char_bytes) { Ok(valid_char) => { match valid_char { "\n" => { if res.ends_with('\r') { res.pop(); // Handle Windows CRLF. } return Ok(res); } valid_char => res.push_str(valid_char), } char_bytes.clear(); } Err(utf_err) => match utf_err.error_len() { None => (), // Incomplete character - get more bytes. Some(_) => bail!( "Error reading from stdin: Non-UTF8 byte sequence encountered: {}", to_hex_string(char_bytes) ), }, } } Err(format_err!("Error reading from stdin: EOF")) } fn set_stdin_echo(&self, enable: bool) { set_stdin_echo(enable); } } #[cfg(test)] pub mod test_helpers { use super::*; use std::collections::VecDeque; #[derive(Debug, PartialEq, Clone, Copy)] pub enum PrintType { Log { verbosity: i32 }, Error, Interactive, Progress { verbosity: i32, finish: bool }, } #[derive(Default)] pub struct TestUI { pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>, pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>, } impl TestUI { pub fn new() -> TestUI { TestUI { ..Default::default() } } pub fn expect_prompt( self, matcher: impl AsRef<str>, reply: Result<impl AsRef<str>, Error>, ) -> Self { self.prompt_replies.borrow_mut().push_back(( Some(matcher.as_ref().to_string()), reply.map(|s| s.as_ref().to_string()), )); self } pub fn expect_all_prompts_asked(&self) { assert_eq!(self.prompt_replies.borrow_mut().len(), 0); } fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> { let message = message.as_ref(); let lines = message.lines().collect::<Vec<_>>(); let lines_len = lines.len(); let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| { let line_finished = idx < lines_len - 1 || message.ends_with('\n'); (typ, line.to_string(), line_finished) }); let mut printed_lines = self.printed_lines.borrow_mut(); // Append to last line if it has the same type if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() { if *last_typ == typ && !*last_line_finished { if let Some((_, line, finished)) = line_tuples.next() { last_line.push_str(&line); *last_line_finished = finished; } }
} } impl UI for TestUI { fn set_verbosity(&mut self, _verbosity: i32) {} fn set_progress_enabled(&mut self, _enabled: bool) {} fn program_name(&self) -> &str { "rypt" } // Write interface fn will_print(&self, _verbosity: i32) -> bool { true } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { self.append_printed_lines(PrintType::Log { verbosity }, message) } fn print_error(&self, err: &Error) -> Result<(), Error> { self.append_printed_lines(PrintType::Error, &format!("{}", err)) } fn println_interactive(&self, message: &str) -> Result<(), Error> { self.append_printed_lines(PrintType::Interactive, message) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { self.append_printed_lines(PrintType::Progress { verbosity, finish }, message) } // Read interface fn can_read(&self) -> bool { true } fn read_prompt(&self, prompt: &str) -> Result<String, Error> { let (matcher, reply) = self .prompt_replies .borrow_mut() .pop_front() .unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt)); if let Some(matcher) = matcher { assert!( prompt.contains(&matcher), "Unexpected prompt in TestUI: '{}', was looking for '{}'", prompt, matcher ); } reply } fn set_stdin_echo(&self, _enable: bool) {} } }
} printed_lines.extend(line_tuples); Ok(())
random_line_split
ui.rs
use failure::{bail, ensure, format_err, Error, Fallible}; use std::cell::{RefCell, RefMut}; use std::io::Read; use std::rc::Rc; use crate::terminal::{set_stdin_echo, TERMINAL_CLEAR_LINE}; use crate::util::to_hex_string; use crate::{Reader, ReaderFactory, Writer}; const ERROR_VERBOSITY: i32 = -1; const INTERACTIVE_VERBOSITY: i32 = -1; // User interaction interface. pub trait UI { // Initialization fn set_verbosity(&mut self, verbosity: i32); fn set_progress_enabled(&mut self, enabled: bool); // Environment information fn program_name(&self) -> &str; // Write/Print interface fn will_print(&self, verbosity: i32) -> bool; fn print(&self, verbosity: i32, message: &str) -> Fallible<()>; fn print_error(&self, err: &Error) -> Fallible<()>; fn println_interactive(&self, message: &str) -> Fallible<()>; fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>; fn println(&self, verbosity: i32, message: &str) -> Fallible<()> { self.print(verbosity, &format!("{}\n", message)) } // Read interface fn can_read(&self) -> bool; fn read_prompt(&self, prompt: &str) -> Fallible<String>; fn set_stdin_echo(&self, enable: bool); fn read_prompt_bool( &self, verbosity: i32, prompt: &str, default: bool, ) -> Fallible<Option<bool>> { if !self.can_read() || !self.will_print(verbosity) { return Ok(None); } let yn_helper = if default { "[Y/n]" } else { "[y/N]" }; let prompt = format!("{} {}: ", prompt, yn_helper); loop { match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() { "y" | "yes" => return Ok(Some(true)), "n" | "no" => return Ok(Some(false)), "" => return Ok(Some(default)), _ => { self.println_interactive("Invalid input, please enter 'y' or 'n'.")?; } } } } fn read_password(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); self.set_stdin_echo(false); let res = self.read_prompt(prompt); self.set_stdin_echo(true); // With echo off we don't get the newline character from input; we need to output it ourselves. self.println_interactive("")?; res } } pub struct BasicUI { program_name: String, input: Rc<RefCell<Option<Reader>>>, output: RefCell<Writer>, input_is_tty: bool, output_is_tty: bool, verbosity: i32, progress_enabled: bool, } impl BasicUI { pub fn new( program_name: String, input: Reader, input_is_tty: bool, output: Writer, output_is_tty: bool, ) -> BasicUI { BasicUI { program_name, input: Rc::new(RefCell::new(Some(input))), input_is_tty, output: RefCell::new(output), output_is_tty, verbosity: 0, progress_enabled: true, } } // Create a function that extracts input stream from this struct, returning it to the caller. // After returned function is called, this struct loses input stream and with it the ability to // prompt user for input/passwords. pub fn input_stream_extractor(&mut self) -> ReaderFactory { let input = Rc::clone(&self.input); Box::new(move || Ok(input.borrow_mut().take().unwrap())) } } impl UI for BasicUI { fn set_verbosity(&mut self, verbosity: i32) { self.verbosity = verbosity; } fn set_progress_enabled(&mut self, enabled: bool) { self.progress_enabled = enabled; } fn program_name(&self) -> &str { &self.program_name } // Write interface fn will_print(&self, verbosity: i32) -> bool { verbosity <= self.verbosity } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { if self.will_print(verbosity) { self.output.borrow_mut().write_all(message.as_bytes())?; } Ok(()) } fn print_error(&self, err: &Error) -> Fallible<()> { if self.will_print(ERROR_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?; } Ok(()) } fn println_interactive(&self, message: &str) -> Fallible<()> { if self.will_print(INTERACTIVE_VERBOSITY) { writeln!(self.output.borrow_mut(), "{}", message)?; } Ok(()) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { if self.progress_enabled { let last_char = if finish { "\n" } else { "\r" }; let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char); self.print(verbosity, &message)?; } Ok(()) } // Read interface fn
(&self) -> bool { self.input.borrow().is_some() && self.input_is_tty && self.output_is_tty && self.will_print(INTERACTIVE_VERBOSITY) } fn read_prompt(&self, prompt: &str) -> Fallible<String> { ensure!(self.can_read(), "Can't read from a non-TTY input"); let mut output = self.output.borrow_mut(); let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap()); write!(output, "{}", prompt)?; // Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'. let mut char_bytes = vec![]; let mut res = String::new(); for byte in input.by_ref().bytes() { char_bytes.push(byte?); match std::str::from_utf8(&char_bytes) { Ok(valid_char) => { match valid_char { "\n" => { if res.ends_with('\r') { res.pop(); // Handle Windows CRLF. } return Ok(res); } valid_char => res.push_str(valid_char), } char_bytes.clear(); } Err(utf_err) => match utf_err.error_len() { None => (), // Incomplete character - get more bytes. Some(_) => bail!( "Error reading from stdin: Non-UTF8 byte sequence encountered: {}", to_hex_string(char_bytes) ), }, } } Err(format_err!("Error reading from stdin: EOF")) } fn set_stdin_echo(&self, enable: bool) { set_stdin_echo(enable); } } #[cfg(test)] pub mod test_helpers { use super::*; use std::collections::VecDeque; #[derive(Debug, PartialEq, Clone, Copy)] pub enum PrintType { Log { verbosity: i32 }, Error, Interactive, Progress { verbosity: i32, finish: bool }, } #[derive(Default)] pub struct TestUI { pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>, pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>, } impl TestUI { pub fn new() -> TestUI { TestUI { ..Default::default() } } pub fn expect_prompt( self, matcher: impl AsRef<str>, reply: Result<impl AsRef<str>, Error>, ) -> Self { self.prompt_replies.borrow_mut().push_back(( Some(matcher.as_ref().to_string()), reply.map(|s| s.as_ref().to_string()), )); self } pub fn expect_all_prompts_asked(&self) { assert_eq!(self.prompt_replies.borrow_mut().len(), 0); } fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> { let message = message.as_ref(); let lines = message.lines().collect::<Vec<_>>(); let lines_len = lines.len(); let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| { let line_finished = idx < lines_len - 1 || message.ends_with('\n'); (typ, line.to_string(), line_finished) }); let mut printed_lines = self.printed_lines.borrow_mut(); // Append to last line if it has the same type if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() { if *last_typ == typ && !*last_line_finished { if let Some((_, line, finished)) = line_tuples.next() { last_line.push_str(&line); *last_line_finished = finished; } } } printed_lines.extend(line_tuples); Ok(()) } } impl UI for TestUI { fn set_verbosity(&mut self, _verbosity: i32) {} fn set_progress_enabled(&mut self, _enabled: bool) {} fn program_name(&self) -> &str { "rypt" } // Write interface fn will_print(&self, _verbosity: i32) -> bool { true } fn print(&self, verbosity: i32, message: &str) -> Fallible<()> { self.append_printed_lines(PrintType::Log { verbosity }, message) } fn print_error(&self, err: &Error) -> Result<(), Error> { self.append_printed_lines(PrintType::Error, &format!("{}", err)) } fn println_interactive(&self, message: &str) -> Result<(), Error> { self.append_printed_lines(PrintType::Interactive, message) } fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> { self.append_printed_lines(PrintType::Progress { verbosity, finish }, message) } // Read interface fn can_read(&self) -> bool { true } fn read_prompt(&self, prompt: &str) -> Result<String, Error> { let (matcher, reply) = self .prompt_replies .borrow_mut() .pop_front() .unwrap_or_else(|| panic!("Unexpected prompt in TestUI: '{}'", prompt)); if let Some(matcher) = matcher { assert!( prompt.contains(&matcher), "Unexpected prompt in TestUI: '{}', was looking for '{}'", prompt, matcher ); } reply } fn set_stdin_echo(&self, _enable: bool) {} } }
can_read
identifier_name
electronics.py
#! python3.4 from __future__ import division, print_function import pygame as P from random import randint, choice from os.path import join from functools import reduce if __name__ == "__main__": import sys sys.path.append("..") from vec2d import vec2d from Engine.effects import repeated_surface as repeat class Chip(): def __init__(self, length, connector, innercolor = P.Color(50,50,50), deviation = 3, bordercolor = P.Color(150,150,150)): size = length,length rect = P.Rect((0,0), size) innerrect = rect.inflate(-connector.indent*2,-connector.indent*2) self.surface = P.Surface(size, flags = P.SRCALPHA) self.surface.fill((0,0,0,0)) insurface = self.surface.subsurface(innerrect)#P.Surface(self.innerrect.size) for x in range(insurface.get_width()): for y in range(insurface.get_height()): insurface.set_at((x,y),[z+randint(-deviation, deviation) for z in innercolor[:3]]) innerlength = innerrect.width ele = (connector.spacing+connector.width) slots = innerlength//ele-2 filled = slots*ele-connector.spacing rest = innerlength - filled if rest%2: print("Warning|electronics.py:could not center chip connectors, change chip size.") con1,con2,con3,con4 = connector.surfaces posses = tuple(range(connector.indent+rest//2, connector.indent+innerlength-rest//2, ele)) self.dis = length-connector.length for z in posses: self.surface.blit(con2,(z,0))#top self.surface.blit(con4,(z,self.dis))#bottom self.surface.blit(con3, (0, z))#left self.surface.blit(con1, (self.dis, z))#right self.interfaces = posses#attachement nodes for circuit def get_interfaces(self, x,y): sides = {} sides["left"] = [vec2d(x, y+z) for z in self.interfaces] sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces] sides["top"] = [vec2d(x+z, y) for z in self.interfaces] sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces] return sides class Fizzle(): """electric fizzle on the Grid""" def __init__(self, surface, connection, speed = 1): self.connection = connection self.surface = surface self.pos = connection.start self.direction = self.end-self.start self.time = connection.time class AnimFizzle(): def __init__(self, grid,amount, speed, color = (250,250,100)): self.grid = grid connections = [] for node in grid.nodes.values(): connections.extend(node.connections) for c in connections: c.direction.length = speed c.scale_time(speed) fizimage = P.image.load(join("Circuit","blib.png")) blitter = P.Surface(fizimage.get_size()) blitter.fill(color) fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT) self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)] def render(self,surface): copy = self.grid.surface.copy() rects = [f.render(copy) for f in self.fizzles] surface.blit(copy, (0,0)) return rects Fi = 0 class Fizzle(): """electric fizzle on the Grid""" def __init__(self, connection, surface): global Fi self.follow(connection) self.surface = surface self.fi = Fi Fi += 1 def follow(self, connection): self.connection = connection self.direction = connection.direction self.pos = vec2d(connection.start) self.time = connection.time def render(self, target): self.time -= 1 if self.time <= 0: self.follow(choice(self.connection.node.connections)) self.pos += self.direction target.blit(self.surface, self.pos) class Grid(): delta = vec2d(-1,-1) class Node(): def __init__(self, position): self.position = position self.connections = [] def __repr__(self): return "Node(%s,%s)" % self.position class Connection(): def __init__(self, start, end, node): self.start = start+Grid.delta self.end = end+Grid.delta self.direction = end-start self.node = node def scale_time(self, speed): self.time = (self.end-self.start).length/self.direction.length def __init__(self, size, chip, connector, positions, tilemap): self.size = size self.chip = chip self.length = connector.width+connector.spacing self.surface = P.Surface(size) self.surface.fill(tilemap.basecolor) chiplength = self.chip.surface.get_width() xshift = chiplength//2 self.chipposs = [] levels = set() rows = set() barrows = [] barlines = [] self.nodes = {} interfaces = {} outsidenode = self.Node((None,None)) for x,y in positions: x,y = pos = (x-xshift, y-xshift) self.chipposs.append(pos) self.nodes[(x,y)] = self.Node((x,y)) interfaces[(x,y)] = chip.get_interfaces(x,y) if y not in levels: levels.add(y) barlines += (y+interface for interface in chip.interfaces) if x not in rows: rows.add(x) barrows += (x+interface for interface in chip.interfaces) minstraight = 21 #####Diagonals##### X = min(rows) XR = max(rows)+chiplength for y in levels:#left and right endconnectors ys = chip.interfaces if y == min(levels):spec = X-minstraight elif y == max(levels):spec = -X+minstraight else:spec = 0 for yl in ys: yt = yl+y+spec tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1)) tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1)) Y = min(levels) for x in rows: if x == min(rows):spec = 0#spec = Y-minstraight elif x == max(rows):spec = 0#spec = -Y+minstraight else: if x > size[0]//2:spec = Y-minstraight else:spec = -Y+minstraight for xl in ys: xt = xl+x+spec tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0)) ######Straight Connections###### xs = min(rows) xe = max(rows)+chiplength bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5)) for y in barlines:
ys = min(levels) ye = size[1]#max(levels)+chiplength bar = repeat(tilemap["v"], (5, ye-ys+minstraight)) for x in barlines: self.surface.blit(bar, (x-1, ys-minstraight)) ######Chips###### [self.surface.blit(chip.surface, pos) for pos in self.chipposs] ##################Fizzle Logic###################### xs = list(rows) xs.sort() ys = list(levels) ys.sort() for x in rows: cons = [False, False] if xs[0] != x:#not left end leftx = xs[xs.index(x)-1] cons[0] = True if xs[-1] != x:#not right end rightx = xs[xs.index(x)+1] cons[1] = True for y in levels: localnode = self.nodes[(x,y)] if cons[0]: leftnode = self.nodes[(leftx, y)] for left, right in zip(interfaces[(leftx, y)]["right"], interfaces[(x,y)]["left"]): localnode.connections.append(self.Connection(right, left, leftnode)) if cons[1]: rightnode = self.nodes[(rightx, y)] for right, left in zip(interfaces[(rightx, y)]["left"], interfaces[(x,y)]["right"]): localnode.connections.append(self.Connection(left, right, rightnode)) if ys[-1] != y: downy = ys[ys.index(y)+1] downnode = self.nodes[(x, downy)] for down, up in zip(interfaces[(x,downy)]["top"], interfaces[(x,y)]["bottom"]): localnode.connections.append(self.Connection(up,down, downnode)) if ys[0] != y: upy = ys[ys.index(y)-1] upnode = self.nodes[(x, upy)] for up, down in zip(interfaces[(x,upy)]["bottom"], interfaces[(x,y)]["top"]): localnode.connections.append(self.Connection(down,up, upnode)) class TileMap(): def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)): self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)]) self.o = o = P.Color(*outercolor) self.i = i = P.Color(*innercolor) self.basecolor = outercolor size = 5,5 l = 5 self.tiles = {} S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[x] = color self.tiles["v"] = S#vertical S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[:, x] = color self.tiles["h"] = S#horizontal def __getitem__(self, key): return self.tiles[key] def save_images(self): for name, surface in self.tiles.items(): P.image.save(surface, "_test_"+name+".png") def draw_line(self, surface, start, end): dif_x = end[0]-start[0] dif_y = end[1]-start[1] if not abs(dif_x) == abs(dif_y) and dif_y and dif_x: raise ValueError("Can only handle 45 Degree Diagonals. Dif: (%s,%s)" % (dif_x, dif_y)) dir_x = dif_x//abs(dif_x) if dif_x else 0 dir_y = dif_y//abs(dif_y) if dif_y else 0 center = set() finish = end[0]+dir_x,end[1]+dir_y x, y = start while (x,y) != finish: center.add((x,y)) x += dir_x y += dir_y middle = (get_surround(*c) for c in center) middle = reduce(set.union, middle) middle -= center {surface.set_at(c, self.i) for c in center} {surface.set_at(m, self.m) for m in middle} def get_surround_full(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1), (x+1,y+1),(x-1,y-1),(x+1,y-1),(x-1,y+1)} def get_surround(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1)} def create_conductor(length, width, light = (200,200,200), dark = (127,127,127)): l = length//3 T = P.Surface((length, width)) T.fill(dark) data = P.PixelArray(T) data[l:l+length//10] = light return T def get_rotated_conductors(length, width, light = (200,200,200), dark = (127,127,127)): data = [create_conductor(length, width, light, dark)] for x in range(3): data.append(P.transform.rotate(data[0], 90+x*90)) return data class Connector(): """Container Class for Chip Connection Data""" def __init__(self, depth, length, width, spacing = 2, light = (200,200,200), dark = (127,127,127)): self.depth = depth self.spacing = spacing self.length = length self.width = width self.indent = length-depth self.surfaces = get_rotated_conductors(length, width, light, dark) if __name__ == "__main__": P.init() connectors = Connector(3,10,3) chip = Chip(78, connectors) P.image.save(create_conductor(20,4), "_test_conductor.png") size = (480, 480) xdelta = size[0]//5 positions = [] for x in range(xdelta,size[0],xdelta): for y in range(xdelta,size[1],xdelta): print(x,y) positions.append((x,y)) tilemap = TileMap() tilemap.save_images() testsurf = P.Surface((200,200)) tilemap.draw_line(testsurf, (50,50),(0,100)) P.image.save(testsurf, "_test_diagonal.png") grid = Grid(size, chip, connectors, positions, tilemap) P.image.save(grid.surface, "_test.png") symcon = Connector(2, 7, 2, 2) symbol = Chip(32, symcon) P.image.save(symbol.surface, "icon.png")
self.surface.blit(bar, (xs-minstraight, y-1))
conditional_block
electronics.py
#! python3.4 from __future__ import division, print_function import pygame as P from random import randint, choice from os.path import join from functools import reduce if __name__ == "__main__": import sys sys.path.append("..") from vec2d import vec2d from Engine.effects import repeated_surface as repeat class Chip(): def __init__(self, length, connector, innercolor = P.Color(50,50,50), deviation = 3, bordercolor = P.Color(150,150,150)): size = length,length rect = P.Rect((0,0), size) innerrect = rect.inflate(-connector.indent*2,-connector.indent*2) self.surface = P.Surface(size, flags = P.SRCALPHA) self.surface.fill((0,0,0,0)) insurface = self.surface.subsurface(innerrect)#P.Surface(self.innerrect.size) for x in range(insurface.get_width()): for y in range(insurface.get_height()): insurface.set_at((x,y),[z+randint(-deviation, deviation) for z in innercolor[:3]]) innerlength = innerrect.width ele = (connector.spacing+connector.width) slots = innerlength//ele-2 filled = slots*ele-connector.spacing rest = innerlength - filled if rest%2: print("Warning|electronics.py:could not center chip connectors, change chip size.") con1,con2,con3,con4 = connector.surfaces posses = tuple(range(connector.indent+rest//2, connector.indent+innerlength-rest//2, ele)) self.dis = length-connector.length for z in posses: self.surface.blit(con2,(z,0))#top self.surface.blit(con4,(z,self.dis))#bottom self.surface.blit(con3, (0, z))#left self.surface.blit(con1, (self.dis, z))#right self.interfaces = posses#attachement nodes for circuit def get_interfaces(self, x,y): sides = {} sides["left"] = [vec2d(x, y+z) for z in self.interfaces] sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces] sides["top"] = [vec2d(x+z, y) for z in self.interfaces] sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces] return sides class Fizzle(): """electric fizzle on the Grid""" def __init__(self, surface, connection, speed = 1): self.connection = connection self.surface = surface self.pos = connection.start self.direction = self.end-self.start self.time = connection.time class AnimFizzle(): def __init__(self, grid,amount, speed, color = (250,250,100)): self.grid = grid connections = [] for node in grid.nodes.values(): connections.extend(node.connections) for c in connections: c.direction.length = speed c.scale_time(speed) fizimage = P.image.load(join("Circuit","blib.png")) blitter = P.Surface(fizimage.get_size()) blitter.fill(color) fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT) self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)] def render(self,surface): copy = self.grid.surface.copy() rects = [f.render(copy) for f in self.fizzles] surface.blit(copy, (0,0)) return rects Fi = 0 class Fizzle(): """electric fizzle on the Grid""" def __init__(self, connection, surface): global Fi self.follow(connection) self.surface = surface self.fi = Fi Fi += 1 def follow(self, connection): self.connection = connection self.direction = connection.direction self.pos = vec2d(connection.start) self.time = connection.time def render(self, target): self.time -= 1 if self.time <= 0: self.follow(choice(self.connection.node.connections)) self.pos += self.direction target.blit(self.surface, self.pos) class Grid(): delta = vec2d(-1,-1) class Node(): def __init__(self, position): self.position = position self.connections = [] def __repr__(self): return "Node(%s,%s)" % self.position class Connection(): def __init__(self, start, end, node): self.start = start+Grid.delta self.end = end+Grid.delta self.direction = end-start self.node = node def scale_time(self, speed): self.time = (self.end-self.start).length/self.direction.length def __init__(self, size, chip, connector, positions, tilemap): self.size = size self.chip = chip self.length = connector.width+connector.spacing self.surface = P.Surface(size) self.surface.fill(tilemap.basecolor) chiplength = self.chip.surface.get_width() xshift = chiplength//2 self.chipposs = [] levels = set() rows = set() barrows = [] barlines = [] self.nodes = {} interfaces = {} outsidenode = self.Node((None,None)) for x,y in positions: x,y = pos = (x-xshift, y-xshift) self.chipposs.append(pos) self.nodes[(x,y)] = self.Node((x,y)) interfaces[(x,y)] = chip.get_interfaces(x,y) if y not in levels: levels.add(y) barlines += (y+interface for interface in chip.interfaces) if x not in rows: rows.add(x) barrows += (x+interface for interface in chip.interfaces) minstraight = 21 #####Diagonals##### X = min(rows) XR = max(rows)+chiplength for y in levels:#left and right endconnectors ys = chip.interfaces if y == min(levels):spec = X-minstraight elif y == max(levels):spec = -X+minstraight else:spec = 0 for yl in ys: yt = yl+y+spec tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1)) tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1)) Y = min(levels) for x in rows: if x == min(rows):spec = 0#spec = Y-minstraight elif x == max(rows):spec = 0#spec = -Y+minstraight else: if x > size[0]//2:spec = Y-minstraight else:spec = -Y+minstraight for xl in ys: xt = xl+x+spec tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0)) ######Straight Connections###### xs = min(rows) xe = max(rows)+chiplength bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5)) for y in barlines: self.surface.blit(bar, (xs-minstraight, y-1)) ys = min(levels) ye = size[1]#max(levels)+chiplength bar = repeat(tilemap["v"], (5, ye-ys+minstraight)) for x in barlines: self.surface.blit(bar, (x-1, ys-minstraight)) ######Chips###### [self.surface.blit(chip.surface, pos) for pos in self.chipposs] ##################Fizzle Logic###################### xs = list(rows) xs.sort() ys = list(levels) ys.sort() for x in rows: cons = [False, False] if xs[0] != x:#not left end leftx = xs[xs.index(x)-1] cons[0] = True if xs[-1] != x:#not right end rightx = xs[xs.index(x)+1] cons[1] = True for y in levels: localnode = self.nodes[(x,y)] if cons[0]: leftnode = self.nodes[(leftx, y)] for left, right in zip(interfaces[(leftx, y)]["right"], interfaces[(x,y)]["left"]): localnode.connections.append(self.Connection(right, left, leftnode)) if cons[1]: rightnode = self.nodes[(rightx, y)] for right, left in zip(interfaces[(rightx, y)]["left"], interfaces[(x,y)]["right"]): localnode.connections.append(self.Connection(left, right, rightnode)) if ys[-1] != y: downy = ys[ys.index(y)+1] downnode = self.nodes[(x, downy)] for down, up in zip(interfaces[(x,downy)]["top"], interfaces[(x,y)]["bottom"]): localnode.connections.append(self.Connection(up,down, downnode)) if ys[0] != y: upy = ys[ys.index(y)-1] upnode = self.nodes[(x, upy)] for up, down in zip(interfaces[(x,upy)]["bottom"], interfaces[(x,y)]["top"]): localnode.connections.append(self.Connection(down,up, upnode)) class TileMap(): def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)): self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)]) self.o = o = P.Color(*outercolor) self.i = i = P.Color(*innercolor) self.basecolor = outercolor size = 5,5 l = 5 self.tiles = {} S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[x] = color self.tiles["v"] = S#vertical S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[:, x] = color self.tiles["h"] = S#horizontal
def save_images(self): for name, surface in self.tiles.items(): P.image.save(surface, "_test_"+name+".png") def draw_line(self, surface, start, end): dif_x = end[0]-start[0] dif_y = end[1]-start[1] if not abs(dif_x) == abs(dif_y) and dif_y and dif_x: raise ValueError("Can only handle 45 Degree Diagonals. Dif: (%s,%s)" % (dif_x, dif_y)) dir_x = dif_x//abs(dif_x) if dif_x else 0 dir_y = dif_y//abs(dif_y) if dif_y else 0 center = set() finish = end[0]+dir_x,end[1]+dir_y x, y = start while (x,y) != finish: center.add((x,y)) x += dir_x y += dir_y middle = (get_surround(*c) for c in center) middle = reduce(set.union, middle) middle -= center {surface.set_at(c, self.i) for c in center} {surface.set_at(m, self.m) for m in middle} def get_surround_full(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1), (x+1,y+1),(x-1,y-1),(x+1,y-1),(x-1,y+1)} def get_surround(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1)} def create_conductor(length, width, light = (200,200,200), dark = (127,127,127)): l = length//3 T = P.Surface((length, width)) T.fill(dark) data = P.PixelArray(T) data[l:l+length//10] = light return T def get_rotated_conductors(length, width, light = (200,200,200), dark = (127,127,127)): data = [create_conductor(length, width, light, dark)] for x in range(3): data.append(P.transform.rotate(data[0], 90+x*90)) return data class Connector(): """Container Class for Chip Connection Data""" def __init__(self, depth, length, width, spacing = 2, light = (200,200,200), dark = (127,127,127)): self.depth = depth self.spacing = spacing self.length = length self.width = width self.indent = length-depth self.surfaces = get_rotated_conductors(length, width, light, dark) if __name__ == "__main__": P.init() connectors = Connector(3,10,3) chip = Chip(78, connectors) P.image.save(create_conductor(20,4), "_test_conductor.png") size = (480, 480) xdelta = size[0]//5 positions = [] for x in range(xdelta,size[0],xdelta): for y in range(xdelta,size[1],xdelta): print(x,y) positions.append((x,y)) tilemap = TileMap() tilemap.save_images() testsurf = P.Surface((200,200)) tilemap.draw_line(testsurf, (50,50),(0,100)) P.image.save(testsurf, "_test_diagonal.png") grid = Grid(size, chip, connectors, positions, tilemap) P.image.save(grid.surface, "_test.png") symcon = Connector(2, 7, 2, 2) symbol = Chip(32, symcon) P.image.save(symbol.surface, "icon.png")
def __getitem__(self, key): return self.tiles[key]
random_line_split
electronics.py
#! python3.4 from __future__ import division, print_function import pygame as P from random import randint, choice from os.path import join from functools import reduce if __name__ == "__main__": import sys sys.path.append("..") from vec2d import vec2d from Engine.effects import repeated_surface as repeat class Chip():
class Fizzle(): """electric fizzle on the Grid""" def __init__(self, surface, connection, speed = 1): self.connection = connection self.surface = surface self.pos = connection.start self.direction = self.end-self.start self.time = connection.time class AnimFizzle(): def __init__(self, grid,amount, speed, color = (250,250,100)): self.grid = grid connections = [] for node in grid.nodes.values(): connections.extend(node.connections) for c in connections: c.direction.length = speed c.scale_time(speed) fizimage = P.image.load(join("Circuit","blib.png")) blitter = P.Surface(fizimage.get_size()) blitter.fill(color) fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT) self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)] def render(self,surface): copy = self.grid.surface.copy() rects = [f.render(copy) for f in self.fizzles] surface.blit(copy, (0,0)) return rects Fi = 0 class Fizzle(): """electric fizzle on the Grid""" def __init__(self, connection, surface): global Fi self.follow(connection) self.surface = surface self.fi = Fi Fi += 1 def follow(self, connection): self.connection = connection self.direction = connection.direction self.pos = vec2d(connection.start) self.time = connection.time def render(self, target): self.time -= 1 if self.time <= 0: self.follow(choice(self.connection.node.connections)) self.pos += self.direction target.blit(self.surface, self.pos) class Grid(): delta = vec2d(-1,-1) class Node(): def __init__(self, position): self.position = position self.connections = [] def __repr__(self): return "Node(%s,%s)" % self.position class Connection(): def __init__(self, start, end, node): self.start = start+Grid.delta self.end = end+Grid.delta self.direction = end-start self.node = node def scale_time(self, speed): self.time = (self.end-self.start).length/self.direction.length def __init__(self, size, chip, connector, positions, tilemap): self.size = size self.chip = chip self.length = connector.width+connector.spacing self.surface = P.Surface(size) self.surface.fill(tilemap.basecolor) chiplength = self.chip.surface.get_width() xshift = chiplength//2 self.chipposs = [] levels = set() rows = set() barrows = [] barlines = [] self.nodes = {} interfaces = {} outsidenode = self.Node((None,None)) for x,y in positions: x,y = pos = (x-xshift, y-xshift) self.chipposs.append(pos) self.nodes[(x,y)] = self.Node((x,y)) interfaces[(x,y)] = chip.get_interfaces(x,y) if y not in levels: levels.add(y) barlines += (y+interface for interface in chip.interfaces) if x not in rows: rows.add(x) barrows += (x+interface for interface in chip.interfaces) minstraight = 21 #####Diagonals##### X = min(rows) XR = max(rows)+chiplength for y in levels:#left and right endconnectors ys = chip.interfaces if y == min(levels):spec = X-minstraight elif y == max(levels):spec = -X+minstraight else:spec = 0 for yl in ys: yt = yl+y+spec tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1)) tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1)) Y = min(levels) for x in rows: if x == min(rows):spec = 0#spec = Y-minstraight elif x == max(rows):spec = 0#spec = -Y+minstraight else: if x > size[0]//2:spec = Y-minstraight else:spec = -Y+minstraight for xl in ys: xt = xl+x+spec tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0)) ######Straight Connections###### xs = min(rows) xe = max(rows)+chiplength bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5)) for y in barlines: self.surface.blit(bar, (xs-minstraight, y-1)) ys = min(levels) ye = size[1]#max(levels)+chiplength bar = repeat(tilemap["v"], (5, ye-ys+minstraight)) for x in barlines: self.surface.blit(bar, (x-1, ys-minstraight)) ######Chips###### [self.surface.blit(chip.surface, pos) for pos in self.chipposs] ##################Fizzle Logic###################### xs = list(rows) xs.sort() ys = list(levels) ys.sort() for x in rows: cons = [False, False] if xs[0] != x:#not left end leftx = xs[xs.index(x)-1] cons[0] = True if xs[-1] != x:#not right end rightx = xs[xs.index(x)+1] cons[1] = True for y in levels: localnode = self.nodes[(x,y)] if cons[0]: leftnode = self.nodes[(leftx, y)] for left, right in zip(interfaces[(leftx, y)]["right"], interfaces[(x,y)]["left"]): localnode.connections.append(self.Connection(right, left, leftnode)) if cons[1]: rightnode = self.nodes[(rightx, y)] for right, left in zip(interfaces[(rightx, y)]["left"], interfaces[(x,y)]["right"]): localnode.connections.append(self.Connection(left, right, rightnode)) if ys[-1] != y: downy = ys[ys.index(y)+1] downnode = self.nodes[(x, downy)] for down, up in zip(interfaces[(x,downy)]["top"], interfaces[(x,y)]["bottom"]): localnode.connections.append(self.Connection(up,down, downnode)) if ys[0] != y: upy = ys[ys.index(y)-1] upnode = self.nodes[(x, upy)] for up, down in zip(interfaces[(x,upy)]["bottom"], interfaces[(x,y)]["top"]): localnode.connections.append(self.Connection(down,up, upnode)) class TileMap(): def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)): self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)]) self.o = o = P.Color(*outercolor) self.i = i = P.Color(*innercolor) self.basecolor = outercolor size = 5,5 l = 5 self.tiles = {} S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[x] = color self.tiles["v"] = S#vertical S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[:, x] = color self.tiles["h"] = S#horizontal def __getitem__(self, key): return self.tiles[key] def save_images(self): for name, surface in self.tiles.items(): P.image.save(surface, "_test_"+name+".png") def draw_line(self, surface, start, end): dif_x = end[0]-start[0] dif_y = end[1]-start[1] if not abs(dif_x) == abs(dif_y) and dif_y and dif_x: raise ValueError("Can only handle 45 Degree Diagonals. Dif: (%s,%s)" % (dif_x, dif_y)) dir_x = dif_x//abs(dif_x) if dif_x else 0 dir_y = dif_y//abs(dif_y) if dif_y else 0 center = set() finish = end[0]+dir_x,end[1]+dir_y x, y = start while (x,y) != finish: center.add((x,y)) x += dir_x y += dir_y middle = (get_surround(*c) for c in center) middle = reduce(set.union, middle) middle -= center {surface.set_at(c, self.i) for c in center} {surface.set_at(m, self.m) for m in middle} def get_surround_full(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1), (x+1,y+1),(x-1,y-1),(x+1,y-1),(x-1,y+1)} def get_surround(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1)} def create_conductor(length, width, light = (200,200,200), dark = (127,127,127)): l = length//3 T = P.Surface((length, width)) T.fill(dark) data = P.PixelArray(T) data[l:l+length//10] = light return T def get_rotated_conductors(length, width, light = (200,200,200), dark = (127,127,127)): data = [create_conductor(length, width, light, dark)] for x in range(3): data.append(P.transform.rotate(data[0], 90+x*90)) return data class Connector(): """Container Class for Chip Connection Data""" def __init__(self, depth, length, width, spacing = 2, light = (200,200,200), dark = (127,127,127)): self.depth = depth self.spacing = spacing self.length = length self.width = width self.indent = length-depth self.surfaces = get_rotated_conductors(length, width, light, dark) if __name__ == "__main__": P.init() connectors = Connector(3,10,3) chip = Chip(78, connectors) P.image.save(create_conductor(20,4), "_test_conductor.png") size = (480, 480) xdelta = size[0]//5 positions = [] for x in range(xdelta,size[0],xdelta): for y in range(xdelta,size[1],xdelta): print(x,y) positions.append((x,y)) tilemap = TileMap() tilemap.save_images() testsurf = P.Surface((200,200)) tilemap.draw_line(testsurf, (50,50),(0,100)) P.image.save(testsurf, "_test_diagonal.png") grid = Grid(size, chip, connectors, positions, tilemap) P.image.save(grid.surface, "_test.png") symcon = Connector(2, 7, 2, 2) symbol = Chip(32, symcon) P.image.save(symbol.surface, "icon.png")
def __init__(self, length, connector, innercolor = P.Color(50,50,50), deviation = 3, bordercolor = P.Color(150,150,150)): size = length,length rect = P.Rect((0,0), size) innerrect = rect.inflate(-connector.indent*2,-connector.indent*2) self.surface = P.Surface(size, flags = P.SRCALPHA) self.surface.fill((0,0,0,0)) insurface = self.surface.subsurface(innerrect)#P.Surface(self.innerrect.size) for x in range(insurface.get_width()): for y in range(insurface.get_height()): insurface.set_at((x,y),[z+randint(-deviation, deviation) for z in innercolor[:3]]) innerlength = innerrect.width ele = (connector.spacing+connector.width) slots = innerlength//ele-2 filled = slots*ele-connector.spacing rest = innerlength - filled if rest%2: print("Warning|electronics.py:could not center chip connectors, change chip size.") con1,con2,con3,con4 = connector.surfaces posses = tuple(range(connector.indent+rest//2, connector.indent+innerlength-rest//2, ele)) self.dis = length-connector.length for z in posses: self.surface.blit(con2,(z,0))#top self.surface.blit(con4,(z,self.dis))#bottom self.surface.blit(con3, (0, z))#left self.surface.blit(con1, (self.dis, z))#right self.interfaces = posses#attachement nodes for circuit def get_interfaces(self, x,y): sides = {} sides["left"] = [vec2d(x, y+z) for z in self.interfaces] sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces] sides["top"] = [vec2d(x+z, y) for z in self.interfaces] sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces] return sides
identifier_body
electronics.py
#! python3.4 from __future__ import division, print_function import pygame as P from random import randint, choice from os.path import join from functools import reduce if __name__ == "__main__": import sys sys.path.append("..") from vec2d import vec2d from Engine.effects import repeated_surface as repeat class Chip(): def __init__(self, length, connector, innercolor = P.Color(50,50,50), deviation = 3, bordercolor = P.Color(150,150,150)): size = length,length rect = P.Rect((0,0), size) innerrect = rect.inflate(-connector.indent*2,-connector.indent*2) self.surface = P.Surface(size, flags = P.SRCALPHA) self.surface.fill((0,0,0,0)) insurface = self.surface.subsurface(innerrect)#P.Surface(self.innerrect.size) for x in range(insurface.get_width()): for y in range(insurface.get_height()): insurface.set_at((x,y),[z+randint(-deviation, deviation) for z in innercolor[:3]]) innerlength = innerrect.width ele = (connector.spacing+connector.width) slots = innerlength//ele-2 filled = slots*ele-connector.spacing rest = innerlength - filled if rest%2: print("Warning|electronics.py:could not center chip connectors, change chip size.") con1,con2,con3,con4 = connector.surfaces posses = tuple(range(connector.indent+rest//2, connector.indent+innerlength-rest//2, ele)) self.dis = length-connector.length for z in posses: self.surface.blit(con2,(z,0))#top self.surface.blit(con4,(z,self.dis))#bottom self.surface.blit(con3, (0, z))#left self.surface.blit(con1, (self.dis, z))#right self.interfaces = posses#attachement nodes for circuit def get_interfaces(self, x,y): sides = {} sides["left"] = [vec2d(x, y+z) for z in self.interfaces] sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces] sides["top"] = [vec2d(x+z, y) for z in self.interfaces] sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces] return sides class Fizzle(): """electric fizzle on the Grid""" def __init__(self, surface, connection, speed = 1): self.connection = connection self.surface = surface self.pos = connection.start self.direction = self.end-self.start self.time = connection.time class
(): def __init__(self, grid,amount, speed, color = (250,250,100)): self.grid = grid connections = [] for node in grid.nodes.values(): connections.extend(node.connections) for c in connections: c.direction.length = speed c.scale_time(speed) fizimage = P.image.load(join("Circuit","blib.png")) blitter = P.Surface(fizimage.get_size()) blitter.fill(color) fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT) self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)] def render(self,surface): copy = self.grid.surface.copy() rects = [f.render(copy) for f in self.fizzles] surface.blit(copy, (0,0)) return rects Fi = 0 class Fizzle(): """electric fizzle on the Grid""" def __init__(self, connection, surface): global Fi self.follow(connection) self.surface = surface self.fi = Fi Fi += 1 def follow(self, connection): self.connection = connection self.direction = connection.direction self.pos = vec2d(connection.start) self.time = connection.time def render(self, target): self.time -= 1 if self.time <= 0: self.follow(choice(self.connection.node.connections)) self.pos += self.direction target.blit(self.surface, self.pos) class Grid(): delta = vec2d(-1,-1) class Node(): def __init__(self, position): self.position = position self.connections = [] def __repr__(self): return "Node(%s,%s)" % self.position class Connection(): def __init__(self, start, end, node): self.start = start+Grid.delta self.end = end+Grid.delta self.direction = end-start self.node = node def scale_time(self, speed): self.time = (self.end-self.start).length/self.direction.length def __init__(self, size, chip, connector, positions, tilemap): self.size = size self.chip = chip self.length = connector.width+connector.spacing self.surface = P.Surface(size) self.surface.fill(tilemap.basecolor) chiplength = self.chip.surface.get_width() xshift = chiplength//2 self.chipposs = [] levels = set() rows = set() barrows = [] barlines = [] self.nodes = {} interfaces = {} outsidenode = self.Node((None,None)) for x,y in positions: x,y = pos = (x-xshift, y-xshift) self.chipposs.append(pos) self.nodes[(x,y)] = self.Node((x,y)) interfaces[(x,y)] = chip.get_interfaces(x,y) if y not in levels: levels.add(y) barlines += (y+interface for interface in chip.interfaces) if x not in rows: rows.add(x) barrows += (x+interface for interface in chip.interfaces) minstraight = 21 #####Diagonals##### X = min(rows) XR = max(rows)+chiplength for y in levels:#left and right endconnectors ys = chip.interfaces if y == min(levels):spec = X-minstraight elif y == max(levels):spec = -X+minstraight else:spec = 0 for yl in ys: yt = yl+y+spec tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1)) tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1)) Y = min(levels) for x in rows: if x == min(rows):spec = 0#spec = Y-minstraight elif x == max(rows):spec = 0#spec = -Y+minstraight else: if x > size[0]//2:spec = Y-minstraight else:spec = -Y+minstraight for xl in ys: xt = xl+x+spec tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0)) ######Straight Connections###### xs = min(rows) xe = max(rows)+chiplength bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5)) for y in barlines: self.surface.blit(bar, (xs-minstraight, y-1)) ys = min(levels) ye = size[1]#max(levels)+chiplength bar = repeat(tilemap["v"], (5, ye-ys+minstraight)) for x in barlines: self.surface.blit(bar, (x-1, ys-minstraight)) ######Chips###### [self.surface.blit(chip.surface, pos) for pos in self.chipposs] ##################Fizzle Logic###################### xs = list(rows) xs.sort() ys = list(levels) ys.sort() for x in rows: cons = [False, False] if xs[0] != x:#not left end leftx = xs[xs.index(x)-1] cons[0] = True if xs[-1] != x:#not right end rightx = xs[xs.index(x)+1] cons[1] = True for y in levels: localnode = self.nodes[(x,y)] if cons[0]: leftnode = self.nodes[(leftx, y)] for left, right in zip(interfaces[(leftx, y)]["right"], interfaces[(x,y)]["left"]): localnode.connections.append(self.Connection(right, left, leftnode)) if cons[1]: rightnode = self.nodes[(rightx, y)] for right, left in zip(interfaces[(rightx, y)]["left"], interfaces[(x,y)]["right"]): localnode.connections.append(self.Connection(left, right, rightnode)) if ys[-1] != y: downy = ys[ys.index(y)+1] downnode = self.nodes[(x, downy)] for down, up in zip(interfaces[(x,downy)]["top"], interfaces[(x,y)]["bottom"]): localnode.connections.append(self.Connection(up,down, downnode)) if ys[0] != y: upy = ys[ys.index(y)-1] upnode = self.nodes[(x, upy)] for up, down in zip(interfaces[(x,upy)]["bottom"], interfaces[(x,y)]["top"]): localnode.connections.append(self.Connection(down,up, upnode)) class TileMap(): def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)): self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)]) self.o = o = P.Color(*outercolor) self.i = i = P.Color(*innercolor) self.basecolor = outercolor size = 5,5 l = 5 self.tiles = {} S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[x] = color self.tiles["v"] = S#vertical S = P.Surface(size) PA = P.PixelArray(S) for x,color in zip(range(5), (o,m,i,m,o)): PA[:, x] = color self.tiles["h"] = S#horizontal def __getitem__(self, key): return self.tiles[key] def save_images(self): for name, surface in self.tiles.items(): P.image.save(surface, "_test_"+name+".png") def draw_line(self, surface, start, end): dif_x = end[0]-start[0] dif_y = end[1]-start[1] if not abs(dif_x) == abs(dif_y) and dif_y and dif_x: raise ValueError("Can only handle 45 Degree Diagonals. Dif: (%s,%s)" % (dif_x, dif_y)) dir_x = dif_x//abs(dif_x) if dif_x else 0 dir_y = dif_y//abs(dif_y) if dif_y else 0 center = set() finish = end[0]+dir_x,end[1]+dir_y x, y = start while (x,y) != finish: center.add((x,y)) x += dir_x y += dir_y middle = (get_surround(*c) for c in center) middle = reduce(set.union, middle) middle -= center {surface.set_at(c, self.i) for c in center} {surface.set_at(m, self.m) for m in middle} def get_surround_full(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1), (x+1,y+1),(x-1,y-1),(x+1,y-1),(x-1,y+1)} def get_surround(x,y): return {(x+1, y),(x, y+1),(x-1, y),(x, y-1)} def create_conductor(length, width, light = (200,200,200), dark = (127,127,127)): l = length//3 T = P.Surface((length, width)) T.fill(dark) data = P.PixelArray(T) data[l:l+length//10] = light return T def get_rotated_conductors(length, width, light = (200,200,200), dark = (127,127,127)): data = [create_conductor(length, width, light, dark)] for x in range(3): data.append(P.transform.rotate(data[0], 90+x*90)) return data class Connector(): """Container Class for Chip Connection Data""" def __init__(self, depth, length, width, spacing = 2, light = (200,200,200), dark = (127,127,127)): self.depth = depth self.spacing = spacing self.length = length self.width = width self.indent = length-depth self.surfaces = get_rotated_conductors(length, width, light, dark) if __name__ == "__main__": P.init() connectors = Connector(3,10,3) chip = Chip(78, connectors) P.image.save(create_conductor(20,4), "_test_conductor.png") size = (480, 480) xdelta = size[0]//5 positions = [] for x in range(xdelta,size[0],xdelta): for y in range(xdelta,size[1],xdelta): print(x,y) positions.append((x,y)) tilemap = TileMap() tilemap.save_images() testsurf = P.Surface((200,200)) tilemap.draw_line(testsurf, (50,50),(0,100)) P.image.save(testsurf, "_test_diagonal.png") grid = Grid(size, chip, connectors, positions, tilemap) P.image.save(grid.surface, "_test.png") symcon = Connector(2, 7, 2, 2) symbol = Chip(32, symcon) P.image.save(symbol.surface, "icon.png")
AnimFizzle
identifier_name
SL1_ImportData.py
# Importing Data ''' Description: This file provide some function that are toe used for importing data . Function this file Contains: - ImportData: Used to import data either from BQ or from Storage. ''' # ----------------------------------------------- Loading Libraries ----------------------------------------------- # import pandas as pd import glob, os, ast, time from datetime import datetime, date, timedelta from SL0_GeneralFunc import LevBasedPrint, AddRecommendation # ------------------------------------------ GrabAnySizeDatafromGoogleBQ ------------------------------------------ # def Exec_BQ(query, projectid): LevBasedPrint('Inside "'+Exec_BQ.__name__+'" function.',3,1) LevBasedPrint('',3,1) return pd.io.gbq.read_gbq(query, project_id=projectid, index_col=None, col_order=None, reauth=False, private_key=None) #, verbose=True deprecated def GenerateTableNames(config): ''' Make use of Domain based parameters to get the data. ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # DatasetName = config['BigQueryConfig']['DatasetName'] SIDs = ast.literal_eval(config['DomainConfig']['SIDs']) DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay'] LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1) LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3) if DataGrabMethodology == 'static': Dates = ast.literal_eval(config['IfStatic']['Date']) StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days']) elif DataGrabMethodology == 'dynamic': DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr'])) else: txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".' AddRecommendation(txt, config) raise Exception(txt) # -----------------------------<<< Generating Table Names >>>------------------------------ # ## Generating Table Names if DataGrabMethodology == 'static': if StaDataWindow != '-': CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2])) format = '%d%m%y' Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ] TableToInclude = '' for i in range(len(SIDs)): for j in range(len(Dates)): TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),' elif DataGrabMethodology == 'dynamic': CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC TableDateToTake = [] while DynDataWindow >= -1: ## -1 to even include the current hour table tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0) TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H')) DynDataWindow -= 1 TableToInclude, TableCnt = '', 0 for i in range(len(SIDs)): for j in range(len(TableDateToTake)): TableCnt += 0 TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),' LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',3,1) return TableToInclude # ------------------------------------------------------------------------------------------- # def GrabAnySizeDatafromGoogleBQ(config):
# -------------------------------------------------- ImportData --------------------------------------------------- # def ImportData(config): """ Can be used to import data from either storage or BQ Extracts any size data from any SID of any number of days. Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest' 'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling 'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations """ # -----------<<< Setting constant values that are to be used inside function >>>----------- # AccessDataFrom = config['DataCollection']['GetDataFrom'] if AccessDataFrom == 'BQ': SettingToUse = config['IterationAim']['CycleType'] if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr']) FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse) GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData'] elif AccessDataFrom == 'Storage': FileName = config['InputPaths']['Storage_RawData'] else: print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom)) txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".' AddRecommendation(txt, config) raise Exception(txt) LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1) LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1) # ----------------------------<<< Accessing Data from BQ >>>------------------------------- # if AccessDataFrom == 'BQ': # -----------------------<<< Setting Configuration for GlTest >>>-------------------------- # if(SettingToUse == 'GlTest'): config['IfStatic']['DataGrabWindow_Days'] = str(int(GlTestDataSize/24 + 1)) config['IfDynamic']['DataGrabWindow_Hr'] = str(GlTestDataSize + 1) # --------------------------<<< Get New Copy Of Data Or Reuse >>>-------------------------- # if (os.path.exists(FileLocalSavingName) == False) | (GetNewCopy in ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y']): DF = GrabAnySizeDatafromGoogleBQ(config) # if(SettingToUse == 'GlTest'): # DF.drop(DF[DF.BinsBackFromCurrent != 'Bin_0'].index, inplace=True) # DF.reset_index(drop=True, inplace=True) DF.to_csv(FileLocalSavingName, index=False)#, sep='|', encoding='utf-8') LevBasedPrint('Data extracted from BQ and saved locally to the File: '+ FileLocalSavingName, 1) else: DF = pd.read_csv(FileLocalSavingName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileLocalSavingName, 1) LevBasedPrint('Data Shape: '+str(DF.shape), 1 ) # --------------------------<<< Accessing Data from Storage >>>---------------------------- # elif AccessDataFrom == 'Storage': DF = pd.read_csv(FileName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileName, 1) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('Data Import | Complete',1) LevBasedPrint('',1,1) return DF # ------------------------------------------------------------------------------------------- # # ----------------------------------------------------------------------------------------------------------------- # ## AP # start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract # stop = -1 # step = -int(start/LimitDecreaseFactor) # limit = int(start/LimitDecreaseFactor) ## util which pt to try to gather the data ##AP # for i in [i for i in range(start,stop, step) if i >= limit]:
''' Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # ModuleSetting = config['Config']['ModuleSettingRuleName'] BQ_Cred = config['BigQueryConfig']['ProjectID'] if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr']) BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery'] LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart'] LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor']) LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1) # -------------------------<<< Generating Tables Name To Query >>>------------------------- # TableToInclude = GenerateTableNames(config) #print(TableToInclude) # -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ # ## Getting the string that will be used to create bins for grouping based on a certain TimePeriod GroupsToInclude = '' if ModuleSetting == 'ICLSSTA': for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600) ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1) GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i) # ------------------------<<< Reading Query From External File >>>------------------------- # LevBasedPrint('Read from a locally saved Query File', 2) queryfile = open(BQ_QueryFile, 'r') query = queryfile.read() queryfile.close() # --------------------<<< Importing Data in Max possible batch size >>>-------------------- # ## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered ## GP start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract ratio = 1/LimitDecreaseFactor limit = 1000 ## util which pt to try to gather the data ## Hardcoded length = 1000 # query='''SELECT 1 limit {lim} offset {off}''' DF = pd.DataFrame() ##GP for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]: if DF.shape == (0, 0): try: offcurr = 0 while offcurr < start: LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2) QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr)) tempDF = Exec_BQ(QueryToUse, BQ_Cred) DF = DF.append(tempDF, ignore_index = True) offcurr += i except Exception as error: txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error) LevBasedPrint(txt, 2) AddRecommendation(txt, config) # raise Exception(txt) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',2,1) return DF # ------------------------------------------------------------------------------------------- #
identifier_body
SL1_ImportData.py
# Importing Data ''' Description: This file provide some function that are toe used for importing data . Function this file Contains: - ImportData: Used to import data either from BQ or from Storage. ''' # ----------------------------------------------- Loading Libraries ----------------------------------------------- # import pandas as pd import glob, os, ast, time from datetime import datetime, date, timedelta from SL0_GeneralFunc import LevBasedPrint, AddRecommendation # ------------------------------------------ GrabAnySizeDatafromGoogleBQ ------------------------------------------ # def Exec_BQ(query, projectid): LevBasedPrint('Inside "'+Exec_BQ.__name__+'" function.',3,1) LevBasedPrint('',3,1) return pd.io.gbq.read_gbq(query, project_id=projectid, index_col=None, col_order=None, reauth=False, private_key=None) #, verbose=True deprecated def GenerateTableNames(config): ''' Make use of Domain based parameters to get the data. ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # DatasetName = config['BigQueryConfig']['DatasetName'] SIDs = ast.literal_eval(config['DomainConfig']['SIDs'])
StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days']) elif DataGrabMethodology == 'dynamic': DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr'])) else: txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".' AddRecommendation(txt, config) raise Exception(txt) # -----------------------------<<< Generating Table Names >>>------------------------------ # ## Generating Table Names if DataGrabMethodology == 'static': if StaDataWindow != '-': CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2])) format = '%d%m%y' Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ] TableToInclude = '' for i in range(len(SIDs)): for j in range(len(Dates)): TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),' elif DataGrabMethodology == 'dynamic': CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC TableDateToTake = [] while DynDataWindow >= -1: ## -1 to even include the current hour table tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0) TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H')) DynDataWindow -= 1 TableToInclude, TableCnt = '', 0 for i in range(len(SIDs)): for j in range(len(TableDateToTake)): TableCnt += 0 TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),' LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',3,1) return TableToInclude # ------------------------------------------------------------------------------------------- # def GrabAnySizeDatafromGoogleBQ(config): ''' Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # ModuleSetting = config['Config']['ModuleSettingRuleName'] BQ_Cred = config['BigQueryConfig']['ProjectID'] if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr']) BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery'] LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart'] LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor']) LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1) # -------------------------<<< Generating Tables Name To Query >>>------------------------- # TableToInclude = GenerateTableNames(config) #print(TableToInclude) # -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ # ## Getting the string that will be used to create bins for grouping based on a certain TimePeriod GroupsToInclude = '' if ModuleSetting == 'ICLSSTA': for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600) ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1) GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i) # ------------------------<<< Reading Query From External File >>>------------------------- # LevBasedPrint('Read from a locally saved Query File', 2) queryfile = open(BQ_QueryFile, 'r') query = queryfile.read() queryfile.close() # --------------------<<< Importing Data in Max possible batch size >>>-------------------- # ## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered ## GP start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract ratio = 1/LimitDecreaseFactor limit = 1000 ## util which pt to try to gather the data ## Hardcoded length = 1000 # query='''SELECT 1 limit {lim} offset {off}''' DF = pd.DataFrame() ##GP for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]: if DF.shape == (0, 0): try: offcurr = 0 while offcurr < start: LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2) QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr)) tempDF = Exec_BQ(QueryToUse, BQ_Cred) DF = DF.append(tempDF, ignore_index = True) offcurr += i except Exception as error: txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error) LevBasedPrint(txt, 2) AddRecommendation(txt, config) # raise Exception(txt) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',2,1) return DF # ------------------------------------------------------------------------------------------- # # -------------------------------------------------- ImportData --------------------------------------------------- # def ImportData(config): """ Can be used to import data from either storage or BQ Extracts any size data from any SID of any number of days. Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest' 'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling 'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations """ # -----------<<< Setting constant values that are to be used inside function >>>----------- # AccessDataFrom = config['DataCollection']['GetDataFrom'] if AccessDataFrom == 'BQ': SettingToUse = config['IterationAim']['CycleType'] if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr']) FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse) GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData'] elif AccessDataFrom == 'Storage': FileName = config['InputPaths']['Storage_RawData'] else: print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom)) txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".' AddRecommendation(txt, config) raise Exception(txt) LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1) LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1) # ----------------------------<<< Accessing Data from BQ >>>------------------------------- # if AccessDataFrom == 'BQ': # -----------------------<<< Setting Configuration for GlTest >>>-------------------------- # if(SettingToUse == 'GlTest'): config['IfStatic']['DataGrabWindow_Days'] = str(int(GlTestDataSize/24 + 1)) config['IfDynamic']['DataGrabWindow_Hr'] = str(GlTestDataSize + 1) # --------------------------<<< Get New Copy Of Data Or Reuse >>>-------------------------- # if (os.path.exists(FileLocalSavingName) == False) | (GetNewCopy in ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y']): DF = GrabAnySizeDatafromGoogleBQ(config) # if(SettingToUse == 'GlTest'): # DF.drop(DF[DF.BinsBackFromCurrent != 'Bin_0'].index, inplace=True) # DF.reset_index(drop=True, inplace=True) DF.to_csv(FileLocalSavingName, index=False)#, sep='|', encoding='utf-8') LevBasedPrint('Data extracted from BQ and saved locally to the File: '+ FileLocalSavingName, 1) else: DF = pd.read_csv(FileLocalSavingName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileLocalSavingName, 1) LevBasedPrint('Data Shape: '+str(DF.shape), 1 ) # --------------------------<<< Accessing Data from Storage >>>---------------------------- # elif AccessDataFrom == 'Storage': DF = pd.read_csv(FileName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileName, 1) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('Data Import | Complete',1) LevBasedPrint('',1,1) return DF # ------------------------------------------------------------------------------------------- # # ----------------------------------------------------------------------------------------------------------------- # ## AP # start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract # stop = -1 # step = -int(start/LimitDecreaseFactor) # limit = int(start/LimitDecreaseFactor) ## util which pt to try to gather the data ##AP # for i in [i for i in range(start,stop, step) if i >= limit]:
DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay'] LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1) LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3) if DataGrabMethodology == 'static': Dates = ast.literal_eval(config['IfStatic']['Date'])
random_line_split
SL1_ImportData.py
# Importing Data ''' Description: This file provide some function that are toe used for importing data . Function this file Contains: - ImportData: Used to import data either from BQ or from Storage. ''' # ----------------------------------------------- Loading Libraries ----------------------------------------------- # import pandas as pd import glob, os, ast, time from datetime import datetime, date, timedelta from SL0_GeneralFunc import LevBasedPrint, AddRecommendation # ------------------------------------------ GrabAnySizeDatafromGoogleBQ ------------------------------------------ # def Exec_BQ(query, projectid): LevBasedPrint('Inside "'+Exec_BQ.__name__+'" function.',3,1) LevBasedPrint('',3,1) return pd.io.gbq.read_gbq(query, project_id=projectid, index_col=None, col_order=None, reauth=False, private_key=None) #, verbose=True deprecated def GenerateTableNames(config): ''' Make use of Domain based parameters to get the data. ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # DatasetName = config['BigQueryConfig']['DatasetName'] SIDs = ast.literal_eval(config['DomainConfig']['SIDs']) DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay'] LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1) LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3) if DataGrabMethodology == 'static': Dates = ast.literal_eval(config['IfStatic']['Date']) StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days']) elif DataGrabMethodology == 'dynamic': DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr'])) else: txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".' AddRecommendation(txt, config) raise Exception(txt) # -----------------------------<<< Generating Table Names >>>------------------------------ # ## Generating Table Names if DataGrabMethodology == 'static': if StaDataWindow != '-': CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2])) format = '%d%m%y' Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ] TableToInclude = '' for i in range(len(SIDs)): for j in range(len(Dates)): TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),' elif DataGrabMethodology == 'dynamic': CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC TableDateToTake = [] while DynDataWindow >= -1: ## -1 to even include the current hour table tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0) TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H')) DynDataWindow -= 1 TableToInclude, TableCnt = '', 0 for i in range(len(SIDs)): for j in range(len(TableDateToTake)): TableCnt += 0 TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),' LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',3,1) return TableToInclude # ------------------------------------------------------------------------------------------- # def GrabAnySizeDatafromGoogleBQ(config): ''' Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # ModuleSetting = config['Config']['ModuleSettingRuleName'] BQ_Cred = config['BigQueryConfig']['ProjectID'] if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr']) BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery'] LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart'] LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor']) LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1) # -------------------------<<< Generating Tables Name To Query >>>------------------------- # TableToInclude = GenerateTableNames(config) #print(TableToInclude) # -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ # ## Getting the string that will be used to create bins for grouping based on a certain TimePeriod GroupsToInclude = '' if ModuleSetting == 'ICLSSTA': for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600) ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1) GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i) # ------------------------<<< Reading Query From External File >>>------------------------- # LevBasedPrint('Read from a locally saved Query File', 2) queryfile = open(BQ_QueryFile, 'r') query = queryfile.read() queryfile.close() # --------------------<<< Importing Data in Max possible batch size >>>-------------------- # ## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered ## GP start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract ratio = 1/LimitDecreaseFactor limit = 1000 ## util which pt to try to gather the data ## Hardcoded length = 1000 # query='''SELECT 1 limit {lim} offset {off}''' DF = pd.DataFrame() ##GP for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]: if DF.shape == (0, 0): try: offcurr = 0 while offcurr < start: LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2) QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr)) tempDF = Exec_BQ(QueryToUse, BQ_Cred) DF = DF.append(tempDF, ignore_index = True) offcurr += i except Exception as error: txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error) LevBasedPrint(txt, 2) AddRecommendation(txt, config) # raise Exception(txt) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',2,1) return DF # ------------------------------------------------------------------------------------------- # # -------------------------------------------------- ImportData --------------------------------------------------- # def
(config): """ Can be used to import data from either storage or BQ Extracts any size data from any SID of any number of days. Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest' 'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling 'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations """ # -----------<<< Setting constant values that are to be used inside function >>>----------- # AccessDataFrom = config['DataCollection']['GetDataFrom'] if AccessDataFrom == 'BQ': SettingToUse = config['IterationAim']['CycleType'] if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr']) FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse) GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData'] elif AccessDataFrom == 'Storage': FileName = config['InputPaths']['Storage_RawData'] else: print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom)) txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".' AddRecommendation(txt, config) raise Exception(txt) LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1) LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1) # ----------------------------<<< Accessing Data from BQ >>>------------------------------- # if AccessDataFrom == 'BQ': # -----------------------<<< Setting Configuration for GlTest >>>-------------------------- # if(SettingToUse == 'GlTest'): config['IfStatic']['DataGrabWindow_Days'] = str(int(GlTestDataSize/24 + 1)) config['IfDynamic']['DataGrabWindow_Hr'] = str(GlTestDataSize + 1) # --------------------------<<< Get New Copy Of Data Or Reuse >>>-------------------------- # if (os.path.exists(FileLocalSavingName) == False) | (GetNewCopy in ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y']): DF = GrabAnySizeDatafromGoogleBQ(config) # if(SettingToUse == 'GlTest'): # DF.drop(DF[DF.BinsBackFromCurrent != 'Bin_0'].index, inplace=True) # DF.reset_index(drop=True, inplace=True) DF.to_csv(FileLocalSavingName, index=False)#, sep='|', encoding='utf-8') LevBasedPrint('Data extracted from BQ and saved locally to the File: '+ FileLocalSavingName, 1) else: DF = pd.read_csv(FileLocalSavingName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileLocalSavingName, 1) LevBasedPrint('Data Shape: '+str(DF.shape), 1 ) # --------------------------<<< Accessing Data from Storage >>>---------------------------- # elif AccessDataFrom == 'Storage': DF = pd.read_csv(FileName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileName, 1) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('Data Import | Complete',1) LevBasedPrint('',1,1) return DF # ------------------------------------------------------------------------------------------- # # ----------------------------------------------------------------------------------------------------------------- # ## AP # start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract # stop = -1 # step = -int(start/LimitDecreaseFactor) # limit = int(start/LimitDecreaseFactor) ## util which pt to try to gather the data ##AP # for i in [i for i in range(start,stop, step) if i >= limit]:
ImportData
identifier_name
SL1_ImportData.py
# Importing Data ''' Description: This file provide some function that are toe used for importing data . Function this file Contains: - ImportData: Used to import data either from BQ or from Storage. ''' # ----------------------------------------------- Loading Libraries ----------------------------------------------- # import pandas as pd import glob, os, ast, time from datetime import datetime, date, timedelta from SL0_GeneralFunc import LevBasedPrint, AddRecommendation # ------------------------------------------ GrabAnySizeDatafromGoogleBQ ------------------------------------------ # def Exec_BQ(query, projectid): LevBasedPrint('Inside "'+Exec_BQ.__name__+'" function.',3,1) LevBasedPrint('',3,1) return pd.io.gbq.read_gbq(query, project_id=projectid, index_col=None, col_order=None, reauth=False, private_key=None) #, verbose=True deprecated def GenerateTableNames(config): ''' Make use of Domain based parameters to get the data. ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # DatasetName = config['BigQueryConfig']['DatasetName'] SIDs = ast.literal_eval(config['DomainConfig']['SIDs']) DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay'] LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1) LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3) if DataGrabMethodology == 'static': Dates = ast.literal_eval(config['IfStatic']['Date']) StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days']) elif DataGrabMethodology == 'dynamic': DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr'])) else: txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".' AddRecommendation(txt, config) raise Exception(txt) # -----------------------------<<< Generating Table Names >>>------------------------------ # ## Generating Table Names if DataGrabMethodology == 'static': if StaDataWindow != '-': CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2])) format = '%d%m%y' Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ] TableToInclude = '' for i in range(len(SIDs)): for j in range(len(Dates)): TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),' elif DataGrabMethodology == 'dynamic': CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC TableDateToTake = [] while DynDataWindow >= -1: ## -1 to even include the current hour table tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0) TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H')) DynDataWindow -= 1 TableToInclude, TableCnt = '', 0 for i in range(len(SIDs)): for j in range(len(TableDateToTake)): TableCnt += 0 TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),' LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',3,1) return TableToInclude # ------------------------------------------------------------------------------------------- # def GrabAnySizeDatafromGoogleBQ(config): ''' Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks ''' # -----------<<< Setting constant values that are to be used inside function >>>----------- # ModuleSetting = config['Config']['ModuleSettingRuleName'] BQ_Cred = config['BigQueryConfig']['ProjectID'] if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr']) BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery'] LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart'] LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor']) LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1) # -------------------------<<< Generating Tables Name To Query >>>------------------------- # TableToInclude = GenerateTableNames(config) #print(TableToInclude) # -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ # ## Getting the string that will be used to create bins for grouping based on a certain TimePeriod GroupsToInclude = '' if ModuleSetting == 'ICLSSTA': for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600) ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1) GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i) # ------------------------<<< Reading Query From External File >>>------------------------- # LevBasedPrint('Read from a locally saved Query File', 2) queryfile = open(BQ_QueryFile, 'r') query = queryfile.read() queryfile.close() # --------------------<<< Importing Data in Max possible batch size >>>-------------------- # ## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered ## GP start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract ratio = 1/LimitDecreaseFactor limit = 1000 ## util which pt to try to gather the data ## Hardcoded length = 1000 # query='''SELECT 1 limit {lim} offset {off}''' DF = pd.DataFrame() ##GP for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]: if DF.shape == (0, 0): try: offcurr = 0 while offcurr < start:
except Exception as error: txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error) LevBasedPrint(txt, 2) AddRecommendation(txt, config) # raise Exception(txt) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('',2,1) return DF # ------------------------------------------------------------------------------------------- # # -------------------------------------------------- ImportData --------------------------------------------------- # def ImportData(config): """ Can be used to import data from either storage or BQ Extracts any size data from any SID of any number of days. Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest' 'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling 'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations """ # -----------<<< Setting constant values that are to be used inside function >>>----------- # AccessDataFrom = config['DataCollection']['GetDataFrom'] if AccessDataFrom == 'BQ': SettingToUse = config['IterationAim']['CycleType'] if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr']) FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse) GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData'] elif AccessDataFrom == 'Storage': FileName = config['InputPaths']['Storage_RawData'] else: print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom)) txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".' AddRecommendation(txt, config) raise Exception(txt) LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1) LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1) # ----------------------------<<< Accessing Data from BQ >>>------------------------------- # if AccessDataFrom == 'BQ': # -----------------------<<< Setting Configuration for GlTest >>>-------------------------- # if(SettingToUse == 'GlTest'): config['IfStatic']['DataGrabWindow_Days'] = str(int(GlTestDataSize/24 + 1)) config['IfDynamic']['DataGrabWindow_Hr'] = str(GlTestDataSize + 1) # --------------------------<<< Get New Copy Of Data Or Reuse >>>-------------------------- # if (os.path.exists(FileLocalSavingName) == False) | (GetNewCopy in ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y']): DF = GrabAnySizeDatafromGoogleBQ(config) # if(SettingToUse == 'GlTest'): # DF.drop(DF[DF.BinsBackFromCurrent != 'Bin_0'].index, inplace=True) # DF.reset_index(drop=True, inplace=True) DF.to_csv(FileLocalSavingName, index=False)#, sep='|', encoding='utf-8') LevBasedPrint('Data extracted from BQ and saved locally to the File: '+ FileLocalSavingName, 1) else: DF = pd.read_csv(FileLocalSavingName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileLocalSavingName, 1) LevBasedPrint('Data Shape: '+str(DF.shape), 1 ) # --------------------------<<< Accessing Data from Storage >>>---------------------------- # elif AccessDataFrom == 'Storage': DF = pd.read_csv(FileName)#, sep='|', encoding='utf-8') LevBasedPrint('Data Loaded From the File: '+ FileName, 1) # ---------------------------------------<<< xyz >>>--------------------------------------- # LevBasedPrint('Data Import | Complete',1) LevBasedPrint('',1,1) return DF # ------------------------------------------------------------------------------------------- # # ----------------------------------------------------------------------------------------------------------------- # ## AP # start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract # stop = -1 # step = -int(start/LimitDecreaseFactor) # limit = int(start/LimitDecreaseFactor) ## util which pt to try to gather the data ##AP # for i in [i for i in range(start,stop, step) if i >= limit]:
LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2) QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr)) tempDF = Exec_BQ(QueryToUse, BQ_Cred) DF = DF.append(tempDF, ignore_index = True) offcurr += i
conditional_block
resnet.rs
use plant::*; use std::{rc::Rc, time::Instant, env, cmp::Ordering::*}; macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; } type M = Slice<u8, usize>; static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [ // resnet18, 34 ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]), ([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]), ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]), ([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]), ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]), ([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]), ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), // resent50, 101, 152,有5个shape前面出现过了 // ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]), // ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]), ([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]), ([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]), // ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]), ([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]), ([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]), ([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]), // ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]), ([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]), ([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]), ([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]), // ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), ([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]), ([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]), ([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]), ]; fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) { let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); let f = Func::new(&name); let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW let bias = f.buf("BIAS", F32, In, x![oc,]); let osize = (size - kern + 2 * pad) / stride + 1; let buf_add = if add != 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None }; let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new(); let lib_cache = unsafe { &mut LIB_CACHE }; let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad, add, relu]) { println!("{} reused", name); x.clone() } else { println!("{} compiling", name); let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] = TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1; let pad_buf = if pad == 0 { a } else { let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除 let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local); f.comp("cache_pad", x![ic, pad_size, pad_size], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })) .tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf); pad_buf }; let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0)); if let Some(x) = buf_add { // add-relu b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2))) } else if relu != 0 { b_final = x!(max::<f32>(0, b_final)); } let b_final = f.comp("B_final", x![oc, osize, osize], b_final); for b in &[b, b_final] { b.split(0, ff0).split(0, ff1).split(0, ff2) .split(4, xx0).split(4, xx1).split(4, xx2) .split(8, yy0).split(8, yy1).split(8, yy2); } b.split(12, rc0).split(14, rx0).split(16, ry0); // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel); if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); } let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1); let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local]) .set_loc(Local).set_zero_init(true); b_local.alloc_at(b, 5); b.before(b_final, 6); b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]); b_final.store(buf_b); if pad_buf != a { pad_buf.alloc_at_func(); } f.compile_arg("-mprefer-vector-width=512"); let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap()); lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone())); lib }; static mut ID: u32 = 0; let id = unsafe { (ID, ID += 1).0 }; let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i, add| { if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } }, b1) } // naive版本,能跑但很慢 // fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) // -> (impl Fn(M, Option<M>), M) { // println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad); // // let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); // let f = Func::new(&name); // let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW // let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW // let bias = f.buf("BIAS", F32, In, x![oc,]); // let osize = (size - kern + 2 * pad) / stride + 1; // let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW // let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad], // x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); // a_pad.set_inline(true); // // let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0))); // let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); // b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); // let (b_final, add) = if add != 0 { // add-relu // let add = f.buf("ADD", F32, In, x![oc, osize, osize]); // (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add)) // } else { // (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None) // }; // let b_final = f.comp("B_final", x![oc, osize, osize], b_final); // b_init.before(b, 3).before(b_final, 3); // b_init.store(buf_b); // b.store_at(buf_b, x![i0, i1, i2]); // b_final.store(buf_b); // // let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap(); // // static mut ID: u32 = 0; // let id = unsafe { (ID, ID += 1).0 }; // let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); // let b1 = *b; // (move |i, add| { // if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } // }, b1) // } fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) { let f = Func::new("maxpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); a_pad.set_inline(true); let osize = (size - kern + 2 * pad) / stride + 1; let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]); let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的 let b = f.comp("B", x![chan, osize, osize, kern, kern], x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2)))); b_init.before(b, 3); b_init.store(buf_b); b.store_at(buf_b, x![i0, i1, i2]); b.tag(0, Parallel); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) { let f = Func::new("avgpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let buf_b = f.buf("B", F32, Out, x![chan,]); let b_init = f.comp("B_init", x![chan,], x!(0)); let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0))); let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size)))); b_init.before(b, 1).before(b_final, 1); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_final.store(buf_b); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn gemv(m: u32, n: u32) -> (impl Fn(M), M) { let f = Func::new("gemv"); let a = f.buf("A", F32, In, x![n,]); let w = f.buf("W", F32, In, x![m, n]); let c = f.buf("C", F32, In, x![m,]); let buf_b = f.buf("B", F32, Out, x![m,]); let b_init = f.comp("B_init", x![m,], x!(c(i0))); let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0))); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_init.before(b, 1); b.tag(0, Parallel); let lib = f.codegen(&[a, w, c, buf_b]).unwrap(); let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1) } fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) { let expansion = if bottleneck { 4 } else { 1 }; let downsample = stride != 1 || inplanes != planes * expansion; if bottleneck { let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1); let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1); let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f4, _)) = &f4 { f4(i, None); } f1(i, None); f2(b1, None); f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i })); }), b3) } else { let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1); let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f3, _)) = &f3 { f3(i, None); } f1(i, None); f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); }), b2) } } fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) -
pl Fn(M), M) { let expansion = if bottleneck { 4 } else { 1 }; let mut layers = Vec::with_capacity(blocks as _); layers.push(block(inplanes, planes, size, stride, bottleneck)); for _ in 1..blocks { layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck)); } let b = layers.last().unwrap().1; (move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b) } fn main() { parallel_init(0); let args = env::args().collect::<Vec<_>>(); assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>"); let repeat = args[2].parse::<u32>().unwrap(); let (blocks, bottleneck) = match args[1].as_str() { "18" => (&[2, 2, 2, 2], false), "34" => (&[3, 4, 6, 3], false), "50" => (&[3, 4, 6, 3], true), "101" => (&[3, 4, 23, 3], true), "152" => (&[3, 8, 36, 3], true), x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x), }; let expansion = if bottleneck { 4 } else { 1 }; let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",)); let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1); let (f2, b2) = maxpool(64, 112, 3, 2, 1); let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck); let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck); let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck); let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck); let (f7, b7) = avgpool(512 * expansion, 7); let (f8, b8) = gemv(1000, 512 * expansion); for _ in 0..4 { let beg = Instant::now(); for _ in 0..repeat { f1(*input, None); f2(b1); f3(b2); f4(b3); f5(b4); f6(b5); f7(b6); f8(b7); } println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32); } fn softmax(x: &mut [f32]) { let mut m = f32::NEG_INFINITY; for x in x.iter() { m = m.max(*x); } let mut s = 0.0; for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; } for x in x.iter_mut() { *x /= s; } } let result = b8.transmute::<f32, _>(1000); softmax(result.flat()); let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>(); result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal }); for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) } }
> (im
identifier_name
resnet.rs
use plant::*; use std::{rc::Rc, time::Instant, env, cmp::Ordering::*}; macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; } type M = Slice<u8, usize>; static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [ // resnet18, 34 ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]), ([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]), ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]), ([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]), ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]), ([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]), ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), // resent50, 101, 152,有5个shape前面出现过了 // ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]), // ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]), ([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]), ([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]), // ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]), ([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]), ([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]), ([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]), // ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]), ([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]), ([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]), ([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]), // ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), ([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]), ([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]), ([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]), ]; fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) { let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); let f = Func::new(&name); let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW let bias = f.buf("BIAS", F32, In, x![oc,]); let osize = (size - kern + 2 * pad) / stride + 1; let buf_add = if add != 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None }; let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new(); let lib_cache = unsafe { &mut LIB_CACHE }; let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad, add, relu]) { println!("{} reused", name); x.clone() } else { println!("{} compiling", name); let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] = TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1; let pad_buf = if pad == 0 { a } else { let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除 let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local); f.comp("cache_pad", x![ic, pad_size, pad_size], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })) .tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf); pad_buf }; let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0)); if let Some(x) = buf_add { // add-relu b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2))) } else if relu != 0 { b_final = x!(max::<f32>(0, b_final)); } let b_final = f.comp("B_final", x![oc, osize, osize], b_final); for b in &[b, b_final] { b.split(0, ff0).split(0, ff1).split(0, ff2) .split(4, xx0).split(4, xx1).split(4, xx2) .split(8, yy0).split(8, yy1).split(8, yy2); } b.split(12, rc0).split(14, rx0).split(16, ry0); // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel); if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); } let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1); let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local]) .set_loc(Local).set_zero_init(true); b_local.alloc_at(b, 5); b.before(b_final, 6); b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]); b_final.store(buf_b); if pad_buf != a { pad_buf.alloc_at_func(); } f.compile_arg("-mprefer-vector-width=512"); let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap()); lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone())); lib }; static mut ID: u32 = 0; let id = unsafe { (ID, ID += 1).0 }; let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i, add| { if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } }, b1) } // naive版本,能跑但很慢 // fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) // -> (impl Fn(M, Option<M>), M) { // println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad); // // let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); // let f = Func::new(&name); // let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW // let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW // let bias = f.buf("BIAS", F32, In, x![oc,]); // let osize = (size - kern + 2 * pad) / stride + 1; // let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW // let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad], // x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); // a_pad.set_inline(true); // // let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0))); // let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); // b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); // let (b_final, add) = if add != 0 { // add-relu // let add = f.buf("ADD", F32, In, x![oc, osize, osize]); // (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add)) // } else { // (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None) // }; // let b_final = f.comp("B_final", x![oc, osize, osize], b_final); // b_init.before(b, 3).before(b_final, 3); // b_init.store(buf_b); // b.store_at(buf_b, x![i0, i1, i2]); // b_final.store(buf_b); // // let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap(); // // static mut ID: u32 = 0; // let id = unsafe { (ID, ID += 1).0 }; // let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); // let b1 = *b; // (move |i, add| { // if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } // }, b1) // } fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) { let f = Func::new("maxpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); a_pad.set_inline(true); let osize = (size - kern + 2 * pad) / stride + 1; let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]); let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的 let b = f.comp("B", x![chan, osize, osize, kern, kern], x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2)))); b_init.before(b, 3); b_init.store(buf_b); b.store_at(buf_b, x![i0, i1, i2]); b.tag(0, Parallel); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let b_init = f.comp("B_init", x![chan,], x!(0)); let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0))); let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size)))); b_init.before(b, 1).before(b_final, 1); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_final.store(buf_b); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn gemv(m: u32, n: u32) -> (impl Fn(M), M) { let f = Func::new("gemv"); let a = f.buf("A", F32, In, x![n,]); let w = f.buf("W", F32, In, x![m, n]); let c = f.buf("C", F32, In, x![m,]); let buf_b = f.buf("B", F32, Out, x![m,]); let b_init = f.comp("B_init", x![m,], x!(c(i0))); let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0))); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_init.before(b, 1); b.tag(0, Parallel); let lib = f.codegen(&[a, w, c, buf_b]).unwrap(); let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1) } fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) { let expansion = if bottleneck { 4 } else { 1 }; let downsample = stride != 1 || inplanes != planes * expansion; if bottleneck { let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1); let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1); let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f4, _)) = &f4 { f4(i, None); } f1(i, None); f2(b1, None); f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i })); }), b3) } else { let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1); let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f3, _)) = &f3 { f3(i, None); } f1(i, None); f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); }), b2) } } fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) -> (impl Fn(M), M) { let expansion = if bottleneck { 4 } else { 1 }; let mut layers = Vec::with_capacity(blocks as _); layers.push(block(inplanes, planes, size, stride, bottleneck)); for _ in 1..blocks { layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck)); } let b = layers.last().unwrap().1; (move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b) } fn main() { parallel_init(0); let args = env::args().collect::<Vec<_>>(); assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>"); let repeat = args[2].parse::<u32>().unwrap(); let (blocks, bottleneck) = match args[1].as_str() { "18" => (&[2, 2, 2, 2], false), "34" => (&[3, 4, 6, 3], false), "50" => (&[3, 4, 6, 3], true), "101" => (&[3, 4, 23, 3], true), "152" => (&[3, 8, 36, 3], true), x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x), }; let expansion = if bottleneck { 4 } else { 1 }; let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",)); let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1); let (f2, b2) = maxpool(64, 112, 3, 2, 1); let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck); let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck); let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck); let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck); let (f7, b7) = avgpool(512 * expansion, 7); let (f8, b8) = gemv(1000, 512 * expansion); for _ in 0..4 { let beg = Instant::now(); for _ in 0..repeat { f1(*input, None); f2(b1); f3(b2); f4(b3); f5(b4); f6(b5); f7(b6); f8(b7); } println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32); } fn softmax(x: &mut [f32]) { let mut m = f32::NEG_INFINITY; for x in x.iter() { m = m.max(*x); } let mut s = 0.0; for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; } for x in x.iter_mut() { *x /= s; } } let result = b8.transmute::<f32, _>(1000); softmax(result.flat()); let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>(); result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal }); for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) } }
let f = Func::new("avgpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let buf_b = f.buf("B", F32, Out, x![chan,]);
random_line_split
resnet.rs
use plant::*; use std::{rc::Rc, time::Instant, env, cmp::Ordering::*}; macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; } type M = Slice<u8, usize>; static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [ // resnet18, 34 ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]), ([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]), ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]), ([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]), ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]), ([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]), ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), // resent50, 101, 152,有5个shape前面出现过了 // ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]), // ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]), ([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]), ([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]), // ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]), ([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]), ([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]), ([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]), // ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]), ([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]), ([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]), ([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]), // ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), ([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]), ([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]), ([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]), ]; fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) { let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); let f = Func::new(&name); let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW let bias = f.buf("BIAS", F32, In, x![oc,]); let osize = (size - kern + 2 * pad) / stride + 1; let buf_add = if add != 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None }; let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new(); let lib_cache = unsafe { &mut LIB_CACHE }; let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad, add, relu]) { println!("{} reused", name); x.clone() } else { println!("{} compiling", name); let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] = TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1; let pad_buf = if pad == 0 { a } else { let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除 let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local); f.comp("cache_pad", x![ic, pad_size, pad_size], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })) .tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf); pad_buf }; let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0)); if let Some(x) = buf_add { // add-relu b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2))) } else if relu != 0 { b_final = x!(max::<f32>(0, b_final)); } let b_final = f.comp("B_final", x![oc, osize, osize], b_final); for b in &[b, b_final] { b.split(0, ff0).split(0, ff1).split(0, ff2) .split(4, xx0).split(4, xx1).split(4, xx2) .split(8, yy0).split(8, yy1).split(8, yy2); } b.split(12, rc0).split(14, rx0).split(16, ry0); // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel); if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); } let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1); let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local]) .set_loc(Local).set_zero_init(true); b_local.alloc_at(b, 5); b.before(b_final, 6); b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]); b_final.store(buf_b); if pad_buf != a { pad_buf.alloc_at_func(); } f.compile_arg("-mprefer-vector-width=512"); let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap()); lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone())); lib }; static mut ID: u32 = 0; let id = unsafe { (ID, ID += 1).0 }; let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i, add| { if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } }, b1) } // naive版本,能跑但很慢 // fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) // -> (impl Fn(M, Option<M>), M) { // println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad); // // let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); // let f = Func::new(&name); // let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW // let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW // let bias = f.buf("BIAS", F32, In, x![oc,]); // let osize = (size - kern + 2 * pad) / stride + 1; // let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW // let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad], // x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); // a_pad.set_inline(true); // // let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0))); // let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); // b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); // let (b_final, add) = if add != 0 { // add-relu // let add = f.buf("ADD", F32, In, x![oc, osize, osize]); // (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add)) // } else { // (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None) // }; // let b_final = f.comp("B_final", x![oc, osize, osize], b_final); // b_init.before(b, 3).before(b_final, 3); // b_init.store(buf_b); // b.store_at(buf_b, x![i0, i1, i2]); // b_final.store(buf_b); // // let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap(); // // static mut ID: u32 = 0; // let id = unsafe { (ID, ID += 1).0 }; // let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); // let b1 = *b; // (move |i, add| { // if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } // }, b1) // } fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) { let f = Func::new("maxpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); a_pad.set_inline(true); let osize = (size - kern + 2 * pad) / stride + 1; let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]); let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的 let b = f.comp("B", x![chan, osize, osize, kern, kern], x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2)))); b_init.before(b, 3); b_init.store(buf_b); b.store_at(buf_b, x![i0, i1, i2]); b.tag(0, Parallel); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) { let f = Func::new("avgpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let buf_b = f.buf("B", F32, Out, x![chan,]); let b_init = f.comp("B_init", x![chan,], x!(0)); let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0))); let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size)))); b_init.before(b, 1).before(b_final, 1); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_final.store(buf_b); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn gemv(m: u32, n: u32) -> (impl Fn(M), M) { let f = Func::new("gemv"); let a = f.buf("A", F32, In, x![n,]); let w = f.buf("W", F32, In, x![m, n]); let c = f.buf("C", F32, In, x![m,]); let buf_b = f.buf("B", F32, Out, x![m,]); let b_init = f.comp("B_init", x![m,], x!(c(i0))); let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0))); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_init.before(b, 1); b.tag(0, Parallel); let lib = f.codegen(&[a, w, c, buf_b]).unwrap(); let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1) } fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) { let expansion = if bottleneck { 4 } else { 1 }; let downsample = stride != 1 || inplanes != planes * expansion; if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1); let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1); let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f4, _)) = &f4 { f4(i, None); } f1(i, None); f2(b1, None); f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i })); }), b3) } else { let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1); let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f3, _)) = &f3 { f3(i, None); } f1(i, None); f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); }), b2) } } fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) -> (impl Fn(M), M) { let expansion = if bottleneck { 4 } else { 1 }; let mut layers = Vec::with_capacity(blocks as _); layers.push(block(inplanes, planes, size, stride, bottleneck)); for _ in 1..blocks { layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck)); } let b = layers.last().unwrap().1; (move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b) } fn main() { parallel_init(0); let args = env::args().collect::<Vec<_>>(); assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>"); let repeat = args[2].parse::<u32>().unwrap(); let (blocks, bottleneck) = match args[1].as_str() { "18" => (&[2, 2, 2, 2], false), "34" => (&[3, 4, 6, 3], false), "50" => (&[3, 4, 6, 3], true), "101" => (&[3, 4, 23, 3], true), "152" => (&[3, 8, 36, 3], true), x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x), }; let expansion = if bottleneck { 4 } else { 1 }; let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",)); let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1); let (f2, b2) = maxpool(64, 112, 3, 2, 1); let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck); let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck); let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck); let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck); let (f7, b7) = avgpool(512 * expansion, 7); let (f8, b8) = gemv(1000, 512 * expansion); for _ in 0..4 { let beg = Instant::now(); for _ in 0..repeat { f1(*input, None); f2(b1); f3(b2); f4(b3); f5(b4); f6(b5); f7(b6); f8(b7); } println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32); } fn softmax(x: &mut [f32]) { let mut m = f32::NEG_INFINITY; for x in x.iter() { m = m.max(*x); } let mut s = 0.0; for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; } for x in x.iter_mut() { *x /= s; } } let result = b8.transmute::<f32, _>(1000); softmax(result.flat()); let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>(); result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal }); for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) } }
conditional_block
resnet.rs
use plant::*; use std::{rc::Rc, time::Instant, env, cmp::Ordering::*}; macro_rules! read { ($s: expr, $($arg:tt)*) => { ArrayInit::Data(&std::fs::read(&format!(concat!("resnet_data/", $s), $($arg)*)).unwrap()) }; } type M = Slice<u8, usize>; static TILE_MAP: [([u32; 6], [u32; 12]); 26] = [ // resnet18, 34 ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 128, 56, 1, 2, 0], [4, 8, 2, 1, 1, 1, 14, 1, 2, 1, 1, 1]), ([64, 128, 56, 3, 2, 1], [16, 1, 8, 1, 1, 14, 14, 2, 2, 16, 1, 3]), ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 256, 28, 1, 2, 0], [8, 4, 8, 1, 1, 14, 14, 2, 1, 1, 1, 1]), ([128, 256, 28, 3, 2, 1], [8, 2, 16, 1, 1, 1, 14, 1, 1, 2, 1, 1]), ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 512, 14, 1, 2, 0], [16, 1, 8, 1, 7, 1, 7, 1, 1, 128, 1, 1]), ([256, 512, 14, 3, 2, 1], [8, 2, 32, 1, 1, 14, 7, 2, 1, 4, 1, 1]), ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), // resent50, 101, 152,有5个shape前面出现过了 // ([3, 64, 224, 7, 2, 3], [16, 1, 4, 1, 1, 1, 16, 1, 7, 3, 1, 7]), ([64, 64, 56, 1, 1, 0], [4, 2, 1, 2, 1, 2, 8, 1, 1, 1, 1, 1]), // ([64, 64, 56, 3, 1, 1], [4, 2, 8, 1, 1, 28, 14, 2, 2, 2, 1, 1]), ([64, 256, 56, 1, 1, 0], [8, 1, 2, 1, 2, 2, 8, 1, 1, 1, 1, 1]), ([256, 64, 56, 1, 1, 0], [8, 1, 2, 1, 2, 1, 8, 1, 1, 1, 1, 1]), ([256, 128, 56, 1, 2, 0], [16, 2, 2, 1, 1, 1, 14, 1, 4, 1, 1, 1]), // ([128, 128, 28, 3, 1, 1], [4, 2, 16, 1, 1, 14, 14, 2, 1, 4, 1, 1]), ([128, 512, 28, 1, 1, 0], [4, 1, 8, 1, 1, 1, 14, 2, 1, 8, 1, 1]), ([256, 512, 56, 1, 2, 0], [16, 2, 8, 1, 1, 2, 14, 1, 2, 1, 1, 1]), ([512, 128, 28, 1, 1, 0], [1, 8, 8, 1, 1, 2, 14, 2, 1, 2, 1, 1]), ([512, 256, 28, 1, 2, 0], [8, 2, 2, 1, 1, 1, 14, 1, 2, 2, 1, 1]), // ([256, 256, 14, 3, 1, 1], [8, 1, 16, 2, 1, 1, 14, 1, 1, 16, 1, 1]), ([256, 1024, 14, 1, 1, 0], [8, 1, 64, 2, 1, 7, 14, 1, 1, 128, 1, 1]), ([512, 1024, 28, 1, 2, 0], [16, 1, 32, 2, 1, 1, 14, 2, 1, 2, 1, 1]), ([1024, 256, 14, 1, 1, 0], [8, 1, 2, 2, 1, 1, 14, 1, 1, 1024, 1, 1]), ([1024, 512, 14, 1, 2, 0], [8, 2, 1, 1, 1, 2, 7, 2, 1, 128, 1, 1]), // ([512, 512, 7, 3, 1, 1], [2, 4, 64, 1, 7, 1, 7, 1, 1, 1, 3, 1]), ([512, 2048, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 1, 1, 1]), ([1024, 2048, 14, 1, 2, 0], [4, 16, 1, 1, 1, 7, 7, 2, 1, 8, 1, 1]), ([2048, 512, 7, 1, 1, 0], [4, 1, 4, 7, 1, 1, 7, 1, 1, 2048, 1, 1]), ]; fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) -> (impl Fn(M, Option<M>), M) { let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); let f = Func::new(&name); let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW let bias = f.buf("BIAS", F32, In, x![oc,]); let osize = (size - kern + 2 * pad) / stride + 1; let buf_add = if add != 0 { Some(f.buf("ADD", F32, In, x![oc, osize, osize])) } else { None }; let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW static mut LIB_CACHE: Vec<([u32; 8], Rc<Lib>)> = Vec::new(); let lib_cache = unsafe { &mut LIB_CACHE }; let lib = if let Some((_, x)) = lib_cache.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad, add, relu]) { println!("{} reused", name); x.clone() } else { println!("{} compiling", name); let [ff0, ff1, ff2, xx0, xx1, xx2, yy0, yy1, yy2, rc0, rx0, ry0] = TILE_MAP.iter().find(|(k, _)| k == &[ic, oc, size, kern, stride, pad]).unwrap().1; let pad_buf = if pad == 0 { a } else { let pad_size = (osize - 1) * stride + kern; // <= size + 2 * pad,因为osize中/ stride不一定是整除 let pad_buf = f.buf("pad_buf", F32, Temp, x![ic, pad_size, pad_size]).set_loc(Local); f.comp("cache_pad", x![ic, pad_size, pad_size], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })) .tags(0..=(if ic < 32 { 1 } else { 0 }), Parallel).store(pad_buf); pad_buf }; let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); b.set_expr(x!(pad_buf(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); let mut b_final = x!(b(i0, i1, i2, 0, 0, 0) + bias(i0)); if let Some(x) = buf_add { // add-relu b_final = x!(max::<f32>(0, b_final + x(i0, i1, i2))) } else if relu != 0 { b_final = x!(max::<f32>(0, b_final)); } let b_final = f.comp("B_final", x![oc, osize, osize], b_final); for b in &[b, b_final] { b.split(0, ff0).split(0, ff1).split(0, ff2) .split(4, xx0).split(4, xx1).split(4, xx2) .split(8, yy0).split(8, yy1).split(8, yy2); } b.split(12, rc0).split(14, rx0).split(16, ry0); // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i, rc_o, rc_i, rx_o, rx_i, ry_o, ry_i b.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 12), (7, 14), (8, 16), (9, 2), (10, 6), (11, 10), (12, 13), (13, 15), (14, 17), (15, 3), (16, 7), (17, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, rc_o, rx_o, ry_o, ff_o_i, yy_o_i, xx_o_i, rc_i, rx_i, ry_i, ff_i, yy_i, xx_i // ff_o_o_o, ff_o_o_i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]); // ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel); if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); } let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1); let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local]) .set_loc(Local).set_zero_init(true); b_local.alloc_at(b, 5); b.before(b_final, 6); b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]); b_final.store(buf_b); if pad_buf != a { pad_buf.alloc_at_func(); } f.compile_arg("-mprefer-vector-width=512"); let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap()); lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone())); lib }; static mut ID: u32 = 0; let id = unsafe { (ID, ID += 1).0 }; let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i, add| { if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } }, b1) } // naive版本,能跑但很慢 // fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32) // -> (impl Fn(M, Option<M>), M) { // println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad); // // let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu); // let f = Func::new(&name); // let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW // let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW // let bias = f.buf("BIAS", F32, In, x![oc,]); // let osize = (size - kern + 2 * pad) / stride + 1; // let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW // let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad], // x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); // a_pad.set_inline(true); // // let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0))); // let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32)); // b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5))); // let (b_final, add) = if add != 0 { // add-relu // let add = f.buf("ADD", F32, In, x![oc, osize, osize]); // (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add)) // } else { // (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None) // }; // let b_final = f.comp("B_final", x![oc, osize, osize], b_final); // b_init.before(b, 3).before(b_final, 3); // b_init.store(buf_b); // b.store_at(buf_b, x![i0, i1, i2]); // b_final.store(buf_b); // // let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap(); // // static mut ID: u32 = 0; // let id = unsafe { (ID, ID += 1).0 }; // let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None)); // let b1 = *b; // (move |i, add| { // if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); } // }, b1) // } fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) { let f = Func::new("maxpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad], x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 })); a_pad.set_inline(true); let osize = (size - kern + 2 * pad) / stride + 1; let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]); let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的 let b = f.comp("B", x![chan, osize, osize, kern, kern], x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2)))); b_init.before(b, 3); b_init.store(buf_b); b.store_at(buf_b, x![i0, i1, i2]); b.tag(0, Parallel); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) { let f = Func::new("avgpool"); let a = f.buf("A", F32, In, x![chan, size, size]); let buf_b = f.buf("B", F32, Out, x![chan,]); let b_init = f.comp("B_init", x![chan,], x!(0)); let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0))); let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size)))); b_init.before(b, 1).before(b_final, 1); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_final.store(buf_b); let lib = f.codegen(&[a, buf_b]).unwrap(); let b = buf_b.array(ArrayInit::None); let b1 = *b; (move |i| { (lib.f)([i, *b].as_ptr()) }, b1) } fn gemv(m: u32, n: u32) -> (impl Fn(M), M) { let f = Func::new("gemv"); let a = f.buf("A", F32, In, x![n,]); let w = f.buf("W", F32, In, x![m, n]); let c = f.buf("C", F32, In, x![m,]); let buf_b = f.buf("B", F32, Out, x![m,]); let b_init = f.comp("B_init", x![m,], x!(c(i0))); let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0))); b_init.store(buf_b); b.store_at(buf_b, x![i0,]); b_init.before(b, 1); b.tag(0, Parallel); let lib = f.codegen(&[a, w, c, buf_b]).unwrap(); let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None)); let b1 = *b; (move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1) } fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) { let expansion = if bottleneck { 4 } else { 1 }; let downsample = stride != 1 || inpl
ol) -> (impl Fn(M), M) { let expansion = if bottleneck { 4 } else { 1 }; let mut layers = Vec::with_capacity(blocks as _); layers.push(block(inplanes, planes, size, stride, bottleneck)); for _ in 1..blocks { layers.push(block(planes * expansion, planes, size / stride, 1, bottleneck)); } let b = layers.last().unwrap().1; (move |mut i| for (f, b) in &layers { f((i, i = *b).0); }, b) } fn main() { parallel_init(0); let args = env::args().collect::<Vec<_>>(); assert_eq!(args.len(), 3, "usage: cargo run --bin resnet <layer> <repeat>"); let repeat = args[2].parse::<u32>().unwrap(); let (blocks, bottleneck) = match args[1].as_str() { "18" => (&[2, 2, 2, 2], false), "34" => (&[3, 4, 6, 3], false), "50" => (&[3, 4, 6, 3], true), "101" => (&[3, 4, 23, 3], true), "152" => (&[3, 8, 36, 3], true), x => panic!("expect 1st argument to be [18, 34, 50, 101, 152], found {}", x), }; let expansion = if bottleneck { 4 } else { 1 }; let input = Func::new("_").buf("input", F32, In, x![3, 224, 224]).array(read!("input",)); let (f1, b1) = conv(3, 64, 224, 7, 2, 3, 0, 1); let (f2, b2) = maxpool(64, 112, 3, 2, 1); let (f3, b3) = layer(64, 64, blocks[0], 56, 1, bottleneck); let (f4, b4) = layer(64 * expansion, 128, blocks[1], 56, 2, bottleneck); let (f5, b5) = layer(128 * expansion, 256, blocks[2], 28, 2, bottleneck); let (f6, b6) = layer(256 * expansion, 512, blocks[3], 14, 2, bottleneck); let (f7, b7) = avgpool(512 * expansion, 7); let (f8, b8) = gemv(1000, 512 * expansion); for _ in 0..4 { let beg = Instant::now(); for _ in 0..repeat { f1(*input, None); f2(b1); f3(b2); f4(b3); f5(b4); f6(b5); f7(b6); f8(b7); } println!("{}s", Instant::now().duration_since(beg).as_secs_f32() / repeat as f32); } fn softmax(x: &mut [f32]) { let mut m = f32::NEG_INFINITY; for x in x.iter() { m = m.max(*x); } let mut s = 0.0; for x in x.iter_mut() { s += (*x = (*x - m).exp(), *x).1; } for x in x.iter_mut() { *x /= s; } } let result = b8.transmute::<f32, _>(1000); softmax(result.flat()); let mut result = result.flat().iter().copied().enumerate().collect::<Vec<_>>(); result.sort_unstable_by(|&(_, x1), &(_, x2)| if x1 > x2 { Less } else if x1 < x2 { Greater } else { Equal }); for (i, x) in &result[0..5] { println!("class = {}, prob = {}", i, x) } }
anes != planes * expansion; if bottleneck { let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1); let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1); let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f4, _)) = &f4 { f4(i, None); } f1(i, None); f2(b1, None); f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i })); }), b3) } else { let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1); let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1); let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None }; (Box::new(move |i| { if let Some((f3, _)) = &f3 { f3(i, None); } f1(i, None); f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); }), b2) } } fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bo
identifier_body
ClassifierAdapter.py
import subprocess import time from random import random, randint, randrange import uuid from bertopic import BERTopic import numpy as np from BuisnessLayer.AnalysisManager.DataObjects import AnalyzedTweet, Claim import pandas as pd import nltk # nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer import text2emotion as te from BuisnessLayer.AnalysisManager.DataObjects import * def get_emotion_by_id(id): if id == 1: return 'Anger' elif id == 2: return 'Disgust' elif id == 3: return 'Sad' elif id == 4: return 'Happy' elif id == 5: return 'Surprise' else: return 'Fear' author_columns = ['name', 'domain', 'author_guid', 'author_screen_name', 'author_full_name', 'author_osn_id', 'description', 'created_at', 'statuses_count', 'followers_count', 'favourites_count', 'friends_count', 'listed_count', 'language', 'profile_background_color', 'profile_background_tile', 'profile_banner_url', 'profile_image_url', 'profile_link_color', 'profile_sidebar_fill_color', 'profile_text_color', 'default_profile', 'contributors_enabled', 'default_profile_image', 'geo_enabled', 'protected', 'location', 'notifications', 'time_zone', 'url', 'utc_offset', 'verified', 'is_suspended_or_not_exists', 'default_post_format', 'likes_count', 'allow_questions', 'allow_anonymous_questions', 'image_size', 'media_path', 'author_type', 'bad_actors_collector_insertion_date', 'xml_importer_insertion_date', 'vico_dump_insertion_date', 'missing_data_complementor_insertion_date', 'bad_actors_markup_insertion_date', 'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type', 'timeline_overlap_insertion_date', 'original_tweet_importer_insertion_date'] post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content', 'description', 'is_detailed', 'is_LB', 'is_valid', 'domain', 'author_guid', 'media_path', 'post_osn_guid', 'post_type', 'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet', 'is_created_via_mobile', 'source_url', 'source_title', 'is_liked', 'post_state', 'post_osn_id', 'retweet_count', 'favorite_count', 'created_at', 'xml_importer_insertion_date', 'timeline_importer_insertion_date', 'original_tweet_importer_insertion_date'] claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords', 'domain', 'verdict', 'category', 'sub_category'] connection_columns = ['claim_id', 'post_id'] # subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True) # ours, should write also stub class ClassifierAdapter: def __init__(self): self.sid = SentimentIntensityAnalyzer() self.i=0 def get_sentiment(self,text) -> int: snt = self.sid.polarity_scores(text) return round(snt['pos']*3-snt['neg']*3) def
(self,text): emo = te.get_emotion(text) return max(emo, key=emo.get) # The output we received, def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"): topics = [] tweets = [] authors = [] topic_tweet_connection = [] for trend in trends_dict.keys(): for topic in trends_dict[trend].claims: topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input for tweet in topic.tweets: topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id}) tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count}) authors.append({'name':tweet.author_name}) pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False) pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False) pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False) pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False) self.i+=1 def _classify_topic(self): subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True) results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']] return results def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>} processed_data = {} if len(trends_dict)==0: return self._trends_to_csv(trends_dict) results = self._classify_topic() print("got classifier results\nparsing the results and running sentiment and emotion") for trend in trends_dict.keys(): print("start trend {}".format(trend)) if trend not in processed_data: processed_data[trend] = list() for topic in trends_dict[trend].claims: tweets = list() for tweet in topic.tweets: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" # sentiment = randint(-3, 3) sentiment = self.get_sentiment(tweet.content) # rand = randrange(6) emotion = self.get_emotion(tweet.content) analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date, tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment, prediction) tweets.append(analyzed_tweet) print(f"add tweet {tweet} to the topic {topic}") print(f"save the topic {topic}, with the list of tweets: {tweets}") processed_data[trend].append(Claim(topic.name, tweets,topic.id)) time.sleep(1) results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake") return callback(processed_data, trends_dict,results) def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>} # print(data) # processed_data = {} # for key in data.keys(): # if key not in processed_data: # processed_data[key]={} # for tweet in data[key].keys(): # processed_data[key][tweet]={} # rand = randrange(100) # if rand < 50: # processed_data[key][tweet]['prediction'] = "wow it's fake" # else: # processed_data[key][tweet]['prediction'] = "100% true" # sentiment = randint(-3, 3) # processed_data[key][tweet]['sentiment'] = sentiment # rand = randrange(6) # processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand) processed_data = {} for claim in data.keys(): # if claim not in processed_data: # processed_data[claim]= list() tweets = list() for tweet in data[claim]: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" sentiment = randint(-3, 3) rand = randrange(6) emotion = get_emotion_by_id(rand) analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment, prediction) tweets.append(analyzed_tweet) if claim in processed_data.keys(): processed_data[claim].append(Claim(claim, tweets)) else: processed_data[claim] = Claim(claim, tweets) time.sleep(1) return callback(processed_data) def get_claims_from_trend(self, trends_tweets): claims = {'claim1': {}, 'claim2': {}} for status in trends_tweets: rand = randrange(10) # print(status.id) # print(status.text) # print(status.author.name) if rand < 5: claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} else: # print(status) claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} return claims def _get_claim_from_trend(self, trends_tweets): print("topic model") df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets]) df = df[['id', 'content','author_name']] if len(df) < 15: print("less then 15 tweets, creating 1 topic") from collections import Counter claim_text = ' '.join([txt[0] for txt in Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common( 10)]) return [Claim(claim_text,trends_tweets,0)] print("build bertopic") bt = BERTopic() print("fit bertopic") topics = bt.fit_transform(df['content'].str.replace("RT", '').values) print("done fitting") df['topic_id'] = topics[0] topic_info = bt.get_topics() topics_text = {} for key in topic_info.keys(): lst = topic_info[key] topics_text[key] = ' '.join([x[0] for x in lst]) # df['topic_text'] = df['topic_id'].apply(lambda x:topics_text[x]) claims = [] print("attaching tweet object for topics") for t in topic_info.keys(): fitered = df[df['topic_id'] == t] tweets = list(filter(lambda t:t.id in fitered['id'].values,trends_tweets)) claims.append(Claim(topics_text[t], tweets,0)) return claims
get_emotion
identifier_name
ClassifierAdapter.py
import subprocess import time from random import random, randint, randrange import uuid from bertopic import BERTopic import numpy as np from BuisnessLayer.AnalysisManager.DataObjects import AnalyzedTweet, Claim import pandas as pd import nltk # nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer import text2emotion as te from BuisnessLayer.AnalysisManager.DataObjects import * def get_emotion_by_id(id): if id == 1: return 'Anger' elif id == 2: return 'Disgust' elif id == 3: return 'Sad' elif id == 4: return 'Happy' elif id == 5: return 'Surprise' else: return 'Fear' author_columns = ['name', 'domain', 'author_guid', 'author_screen_name', 'author_full_name', 'author_osn_id', 'description', 'created_at', 'statuses_count', 'followers_count', 'favourites_count', 'friends_count', 'listed_count', 'language', 'profile_background_color', 'profile_background_tile', 'profile_banner_url', 'profile_image_url', 'profile_link_color', 'profile_sidebar_fill_color', 'profile_text_color', 'default_profile', 'contributors_enabled', 'default_profile_image', 'geo_enabled', 'protected', 'location', 'notifications', 'time_zone', 'url', 'utc_offset', 'verified', 'is_suspended_or_not_exists', 'default_post_format', 'likes_count', 'allow_questions', 'allow_anonymous_questions', 'image_size', 'media_path', 'author_type', 'bad_actors_collector_insertion_date', 'xml_importer_insertion_date', 'vico_dump_insertion_date', 'missing_data_complementor_insertion_date', 'bad_actors_markup_insertion_date', 'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type', 'timeline_overlap_insertion_date', 'original_tweet_importer_insertion_date'] post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content', 'description', 'is_detailed', 'is_LB', 'is_valid', 'domain', 'author_guid', 'media_path', 'post_osn_guid', 'post_type', 'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet', 'is_created_via_mobile', 'source_url', 'source_title', 'is_liked', 'post_state', 'post_osn_id', 'retweet_count', 'favorite_count', 'created_at', 'xml_importer_insertion_date', 'timeline_importer_insertion_date', 'original_tweet_importer_insertion_date'] claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords', 'domain', 'verdict', 'category', 'sub_category'] connection_columns = ['claim_id', 'post_id'] # subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True) # ours, should write also stub class ClassifierAdapter: def __init__(self): self.sid = SentimentIntensityAnalyzer() self.i=0 def get_sentiment(self,text) -> int: snt = self.sid.polarity_scores(text) return round(snt['pos']*3-snt['neg']*3) def get_emotion(self,text):
def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"): topics = [] tweets = [] authors = [] topic_tweet_connection = [] for trend in trends_dict.keys(): for topic in trends_dict[trend].claims: topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input for tweet in topic.tweets: topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id}) tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count}) authors.append({'name':tweet.author_name}) pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False) pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False) pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False) pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False) self.i+=1 def _classify_topic(self): subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True) results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']] return results def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>} processed_data = {} if len(trends_dict)==0: return self._trends_to_csv(trends_dict) results = self._classify_topic() print("got classifier results\nparsing the results and running sentiment and emotion") for trend in trends_dict.keys(): print("start trend {}".format(trend)) if trend not in processed_data: processed_data[trend] = list() for topic in trends_dict[trend].claims: tweets = list() for tweet in topic.tweets: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" # sentiment = randint(-3, 3) sentiment = self.get_sentiment(tweet.content) # rand = randrange(6) emotion = self.get_emotion(tweet.content) analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date, tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment, prediction) tweets.append(analyzed_tweet) print(f"add tweet {tweet} to the topic {topic}") print(f"save the topic {topic}, with the list of tweets: {tweets}") processed_data[trend].append(Claim(topic.name, tweets,topic.id)) time.sleep(1) results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake") return callback(processed_data, trends_dict,results) def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>} # print(data) # processed_data = {} # for key in data.keys(): # if key not in processed_data: # processed_data[key]={} # for tweet in data[key].keys(): # processed_data[key][tweet]={} # rand = randrange(100) # if rand < 50: # processed_data[key][tweet]['prediction'] = "wow it's fake" # else: # processed_data[key][tweet]['prediction'] = "100% true" # sentiment = randint(-3, 3) # processed_data[key][tweet]['sentiment'] = sentiment # rand = randrange(6) # processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand) processed_data = {} for claim in data.keys(): # if claim not in processed_data: # processed_data[claim]= list() tweets = list() for tweet in data[claim]: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" sentiment = randint(-3, 3) rand = randrange(6) emotion = get_emotion_by_id(rand) analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment, prediction) tweets.append(analyzed_tweet) if claim in processed_data.keys(): processed_data[claim].append(Claim(claim, tweets)) else: processed_data[claim] = Claim(claim, tweets) time.sleep(1) return callback(processed_data) def get_claims_from_trend(self, trends_tweets): claims = {'claim1': {}, 'claim2': {}} for status in trends_tweets: rand = randrange(10) # print(status.id) # print(status.text) # print(status.author.name) if rand < 5: claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} else: # print(status) claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} return claims def _get_claim_from_trend(self, trends_tweets): print("topic model") df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets]) df = df[['id', 'content','author_name']] if len(df) < 15: print("less then 15 tweets, creating 1 topic") from collections import Counter claim_text = ' '.join([txt[0] for txt in Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common( 10)]) return [Claim(claim_text,trends_tweets,0)] print("build bertopic") bt = BERTopic() print("fit bertopic") topics = bt.fit_transform(df['content'].str.replace("RT", '').values) print("done fitting") df['topic_id'] = topics[0] topic_info = bt.get_topics() topics_text = {} for key in topic_info.keys(): lst = topic_info[key] topics_text[key] = ' '.join([x[0] for x in lst]) # df['topic_text'] = df['topic_id'].apply(lambda x:topics_text[x]) claims = [] print("attaching tweet object for topics") for t in topic_info.keys(): fitered = df[df['topic_id'] == t] tweets = list(filter(lambda t:t.id in fitered['id'].values,trends_tweets)) claims.append(Claim(topics_text[t], tweets,0)) return claims
emo = te.get_emotion(text) return max(emo, key=emo.get) # The output we received,
identifier_body
ClassifierAdapter.py
import subprocess import time from random import random, randint, randrange import uuid from bertopic import BERTopic import numpy as np from BuisnessLayer.AnalysisManager.DataObjects import AnalyzedTweet, Claim import pandas as pd import nltk # nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer import text2emotion as te from BuisnessLayer.AnalysisManager.DataObjects import * def get_emotion_by_id(id): if id == 1: return 'Anger' elif id == 2: return 'Disgust' elif id == 3: return 'Sad' elif id == 4: return 'Happy' elif id == 5: return 'Surprise' else: return 'Fear' author_columns = ['name', 'domain', 'author_guid', 'author_screen_name', 'author_full_name', 'author_osn_id', 'description', 'created_at', 'statuses_count', 'followers_count', 'favourites_count', 'friends_count', 'listed_count', 'language', 'profile_background_color', 'profile_background_tile', 'profile_banner_url', 'profile_image_url', 'profile_link_color', 'profile_sidebar_fill_color', 'profile_text_color', 'default_profile', 'contributors_enabled', 'default_profile_image', 'geo_enabled', 'protected', 'location', 'notifications', 'time_zone', 'url', 'utc_offset', 'verified', 'is_suspended_or_not_exists', 'default_post_format', 'likes_count', 'allow_questions', 'allow_anonymous_questions', 'image_size', 'media_path', 'author_type', 'bad_actors_collector_insertion_date', 'xml_importer_insertion_date', 'vico_dump_insertion_date', 'missing_data_complementor_insertion_date', 'bad_actors_markup_insertion_date', 'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type', 'timeline_overlap_insertion_date', 'original_tweet_importer_insertion_date'] post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content', 'description', 'is_detailed', 'is_LB', 'is_valid', 'domain', 'author_guid', 'media_path', 'post_osn_guid', 'post_type', 'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet', 'is_created_via_mobile', 'source_url', 'source_title', 'is_liked', 'post_state', 'post_osn_id', 'retweet_count', 'favorite_count', 'created_at', 'xml_importer_insertion_date', 'timeline_importer_insertion_date', 'original_tweet_importer_insertion_date'] claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords', 'domain', 'verdict', 'category', 'sub_category'] connection_columns = ['claim_id', 'post_id'] # subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True) # ours, should write also stub class ClassifierAdapter: def __init__(self): self.sid = SentimentIntensityAnalyzer() self.i=0 def get_sentiment(self,text) -> int: snt = self.sid.polarity_scores(text) return round(snt['pos']*3-snt['neg']*3) def get_emotion(self,text): emo = te.get_emotion(text) return max(emo, key=emo.get) # The output we received, def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"): topics = [] tweets = [] authors = [] topic_tweet_connection = [] for trend in trends_dict.keys(): for topic in trends_dict[trend].claims: topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input for tweet in topic.tweets: topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id}) tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count}) authors.append({'name':tweet.author_name}) pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False) pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False) pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False) pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False) self.i+=1 def _classify_topic(self): subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True) results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']] return results def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>} processed_data = {} if len(trends_dict)==0: return self._trends_to_csv(trends_dict) results = self._classify_topic() print("got classifier results\nparsing the results and running sentiment and emotion") for trend in trends_dict.keys():
time.sleep(1) results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake") return callback(processed_data, trends_dict,results) def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>} # print(data) # processed_data = {} # for key in data.keys(): # if key not in processed_data: # processed_data[key]={} # for tweet in data[key].keys(): # processed_data[key][tweet]={} # rand = randrange(100) # if rand < 50: # processed_data[key][tweet]['prediction'] = "wow it's fake" # else: # processed_data[key][tweet]['prediction'] = "100% true" # sentiment = randint(-3, 3) # processed_data[key][tweet]['sentiment'] = sentiment # rand = randrange(6) # processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand) processed_data = {} for claim in data.keys(): # if claim not in processed_data: # processed_data[claim]= list() tweets = list() for tweet in data[claim]: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" sentiment = randint(-3, 3) rand = randrange(6) emotion = get_emotion_by_id(rand) analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment, prediction) tweets.append(analyzed_tweet) if claim in processed_data.keys(): processed_data[claim].append(Claim(claim, tweets)) else: processed_data[claim] = Claim(claim, tweets) time.sleep(1) return callback(processed_data) def get_claims_from_trend(self, trends_tweets): claims = {'claim1': {}, 'claim2': {}} for status in trends_tweets: rand = randrange(10) # print(status.id) # print(status.text) # print(status.author.name) if rand < 5: claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} else: # print(status) claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} return claims def _get_claim_from_trend(self, trends_tweets): print("topic model") df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets]) df = df[['id', 'content','author_name']] if len(df) < 15: print("less then 15 tweets, creating 1 topic") from collections import Counter claim_text = ' '.join([txt[0] for txt in Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common( 10)]) return [Claim(claim_text,trends_tweets,0)] print("build bertopic") bt = BERTopic() print("fit bertopic") topics = bt.fit_transform(df['content'].str.replace("RT", '').values) print("done fitting") df['topic_id'] = topics[0] topic_info = bt.get_topics() topics_text = {} for key in topic_info.keys(): lst = topic_info[key] topics_text[key] = ' '.join([x[0] for x in lst]) # df['topic_text'] = df['topic_id'].apply(lambda x:topics_text[x]) claims = [] print("attaching tweet object for topics") for t in topic_info.keys(): fitered = df[df['topic_id'] == t] tweets = list(filter(lambda t:t.id in fitered['id'].values,trends_tweets)) claims.append(Claim(topics_text[t], tweets,0)) return claims
print("start trend {}".format(trend)) if trend not in processed_data: processed_data[trend] = list() for topic in trends_dict[trend].claims: tweets = list() for tweet in topic.tweets: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" # sentiment = randint(-3, 3) sentiment = self.get_sentiment(tweet.content) # rand = randrange(6) emotion = self.get_emotion(tweet.content) analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date, tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment, prediction) tweets.append(analyzed_tweet) print(f"add tweet {tweet} to the topic {topic}") print(f"save the topic {topic}, with the list of tweets: {tweets}") processed_data[trend].append(Claim(topic.name, tweets,topic.id))
conditional_block
ClassifierAdapter.py
import subprocess import time from random import random, randint, randrange import uuid from bertopic import BERTopic import numpy as np from BuisnessLayer.AnalysisManager.DataObjects import AnalyzedTweet, Claim import pandas as pd import nltk # nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer import text2emotion as te from BuisnessLayer.AnalysisManager.DataObjects import * def get_emotion_by_id(id): if id == 1: return 'Anger' elif id == 2: return 'Disgust' elif id == 3: return 'Sad' elif id == 4: return 'Happy' elif id == 5: return 'Surprise' else: return 'Fear' author_columns = ['name', 'domain', 'author_guid', 'author_screen_name', 'author_full_name', 'author_osn_id', 'description', 'created_at', 'statuses_count', 'followers_count', 'favourites_count', 'friends_count', 'listed_count', 'language', 'profile_background_color', 'profile_background_tile', 'profile_banner_url', 'profile_image_url', 'profile_link_color', 'profile_sidebar_fill_color', 'profile_text_color', 'default_profile', 'contributors_enabled', 'default_profile_image', 'geo_enabled', 'protected', 'location', 'notifications', 'time_zone', 'url', 'utc_offset', 'verified', 'is_suspended_or_not_exists', 'default_post_format', 'likes_count', 'allow_questions', 'allow_anonymous_questions', 'image_size', 'media_path', 'author_type', 'bad_actors_collector_insertion_date', 'xml_importer_insertion_date', 'vico_dump_insertion_date', 'missing_data_complementor_insertion_date', 'bad_actors_markup_insertion_date', 'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type', 'timeline_overlap_insertion_date', 'original_tweet_importer_insertion_date'] post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content', 'description', 'is_detailed', 'is_LB', 'is_valid', 'domain', 'author_guid', 'media_path', 'post_osn_guid', 'post_type', 'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet', 'is_created_via_mobile', 'source_url', 'source_title', 'is_liked', 'post_state', 'post_osn_id', 'retweet_count', 'favorite_count', 'created_at', 'xml_importer_insertion_date', 'timeline_importer_insertion_date', 'original_tweet_importer_insertion_date'] claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords', 'domain', 'verdict', 'category', 'sub_category'] connection_columns = ['claim_id', 'post_id'] # subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True) # ours, should write also stub class ClassifierAdapter: def __init__(self): self.sid = SentimentIntensityAnalyzer() self.i=0 def get_sentiment(self,text) -> int: snt = self.sid.polarity_scores(text) return round(snt['pos']*3-snt['neg']*3) def get_emotion(self,text): emo = te.get_emotion(text) return max(emo, key=emo.get) # The output we received, def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"): topics = [] tweets = [] authors = [] topic_tweet_connection = [] for trend in trends_dict.keys(): for topic in trends_dict[trend].claims: topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input for tweet in topic.tweets: topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id}) tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count}) authors.append({'name':tweet.author_name}) pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False) pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False) pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False) pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False) self.i+=1 def _classify_topic(self): subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True) results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']] return results def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>} processed_data = {} if len(trends_dict)==0: return self._trends_to_csv(trends_dict) results = self._classify_topic() print("got classifier results\nparsing the results and running sentiment and emotion") for trend in trends_dict.keys(): print("start trend {}".format(trend)) if trend not in processed_data: processed_data[trend] = list() for topic in trends_dict[trend].claims: tweets = list() for tweet in topic.tweets: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" # sentiment = randint(-3, 3) sentiment = self.get_sentiment(tweet.content) # rand = randrange(6) emotion = self.get_emotion(tweet.content) analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date, tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment, prediction) tweets.append(analyzed_tweet) print(f"add tweet {tweet} to the topic {topic}") print(f"save the topic {topic}, with the list of tweets: {tweets}") processed_data[trend].append(Claim(topic.name, tweets,topic.id))
results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake") return callback(processed_data, trends_dict,results) def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>} # print(data) # processed_data = {} # for key in data.keys(): # if key not in processed_data: # processed_data[key]={} # for tweet in data[key].keys(): # processed_data[key][tweet]={} # rand = randrange(100) # if rand < 50: # processed_data[key][tweet]['prediction'] = "wow it's fake" # else: # processed_data[key][tweet]['prediction'] = "100% true" # sentiment = randint(-3, 3) # processed_data[key][tweet]['sentiment'] = sentiment # rand = randrange(6) # processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand) processed_data = {} for claim in data.keys(): # if claim not in processed_data: # processed_data[claim]= list() tweets = list() for tweet in data[claim]: rand = randrange(100) if rand < 50: prediction = "fake" else: prediction = "true" sentiment = randint(-3, 3) rand = randrange(6) emotion = get_emotion_by_id(rand) analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment, prediction) tweets.append(analyzed_tweet) if claim in processed_data.keys(): processed_data[claim].append(Claim(claim, tweets)) else: processed_data[claim] = Claim(claim, tweets) time.sleep(1) return callback(processed_data) def get_claims_from_trend(self, trends_tweets): claims = {'claim1': {}, 'claim2': {}} for status in trends_tweets: rand = randrange(10) # print(status.id) # print(status.text) # print(status.author.name) if rand < 5: claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} else: # print(status) claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content} return claims def _get_claim_from_trend(self, trends_tweets): print("topic model") df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets]) df = df[['id', 'content','author_name']] if len(df) < 15: print("less then 15 tweets, creating 1 topic") from collections import Counter claim_text = ' '.join([txt[0] for txt in Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common( 10)]) return [Claim(claim_text,trends_tweets,0)] print("build bertopic") bt = BERTopic() print("fit bertopic") topics = bt.fit_transform(df['content'].str.replace("RT", '').values) print("done fitting") df['topic_id'] = topics[0] topic_info = bt.get_topics() topics_text = {} for key in topic_info.keys(): lst = topic_info[key] topics_text[key] = ' '.join([x[0] for x in lst]) # df['topic_text'] = df['topic_id'].apply(lambda x:topics_text[x]) claims = [] print("attaching tweet object for topics") for t in topic_info.keys(): fitered = df[df['topic_id'] == t] tweets = list(filter(lambda t:t.id in fitered['id'].values,trends_tweets)) claims.append(Claim(topics_text[t], tweets,0)) return claims
time.sleep(1)
random_line_split
MapScreen.js
import React, { Component } from 'react'; import { FlatList, SafeAreaView, Dimensions, StyleSheet, View, Image, TextInput, Modal } from 'react-native'; import MapView from 'react-native-maps'; import MapViewDirections from 'react-native-maps-directions'; import { Marker } from "react-native-maps"; import { Text } from 'react-native'; import { TouchableOpacity } from 'react-native-gesture-handler'; import Buttons from '../styles/Buttons.js' import Colors from '../styles/Colors.js'; import { MaterialIcons } from '@expo/vector-icons'; import axios from 'axios' const {height, width} = Dimensions.get('window'); const LATITUDE = 34.06279; const LONGITUDE = -118.44390; const LATITUDE_DELTA = 0.0922; const LONGITUDE_DELTA = .05; const GOOGLE_MAPS_APIKEY = 'AIzaSyBx4y6okTMakFLVwR5PKVN9kyqpbJFykrE'; export default class MapScreen extends Component { constructor(props) { super(props); this.state = { startValue: 'Start', initialCoords:[ {latitude:34.073026, longitude:-118.465619}, {latitude:34.067223, longitude:-118.410851} ], coordinates: [ { latitude: 34.06279, longitude: -118.44390, }, { latitude: 34.06241, longitude: -118.44375, }, ], clocation: { latitude: 34.06637, longitude:-118.44524, }, dur: null, dis: null, saveWalk:{ startingLocation: null, destinationLocation: null, forZoom: { distance: Number, duration: Number, coordinates: [] } }, modalVisible: false, currentPath: "Current Path", premadePath: false, description: "", name: "", }; this.mapView = null; } // Start and Stop route button functionality onStartWalk = () =>{ this.setState({ saveWalk:{ startingLocation:this.state.coordinates[1], destinationLocation: this.state.coordinates[0]}, }) if(this.state.startValue=='Start'){ this.setState({ startValue:'Stop' }); this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{ edgePadding: { right: (width / 10), bottom: (height / 20), left: (width / 10), top: (height / 20), } } ); } else{ this.setState({ startValue:'Start' }); this.mapView.fitToCoordinates(this.state.initialCoords,{ edgePadding: { right: width, bottom: height, left: width, top: height } } ); } } // Saves user's walks to database saveWalk = (visible) => { var params = JSON.stringify({ 'name': this.state.name, 'description': this.state.description, 'coordinates': this.state.coordinates, 'profile': [global.session_id], }); axios .post("http://127.0.0.1:8000/walks/", params, {"headers": { 'content-Type': 'application/json' }}) .then(this.setModalVisible(visible)) .catch(error => console.log(error) ); console.log(params) } setModalVisible = (visible) => { this.setState({modalVisible:visible}); } // When map is pressed, route is created from current location onMapPress = (e) => { this.setState({ coordinates: [ e.nativeEvent.coordinate, this.state.clocation ], currentPath: "Current Path", premadePath: false, }); } onRoutePress = (long, lat ) => { this.setState({ coordinates: [ {latitude: lat, longitude: long}, this.state.clocation ], }); } // Getting walks data for explore portion componentDidMount() { this.currentLocation(); this.intervalID = setInterval(this.currentLocation.bind(this), 1000); axios .get("http://127.0.0.1:8000/walks") .then(response => this.setState({walks: response.data.results})) .catch(error => console.log(error) ); } componentWillUnmount() { clearInterval(this.intervalID); } currentLocation = () => { navigator.geolocation.getCurrentPosition( position => { const latitude = position.coords.latitude; const longitude = position.coords.longitude; this.setState({ clocation: {latitude, longitude} }); }, { enableHighAccuracy: true, timeout:20000, maximumAge: 1000} ); }; setPremadePath = (item) => { this.setState({ coordinates: item.coordinates, currentPath: item.name, premadePath: true, }) console.log(item.coordinates) }
() { const { modalVisible } = this.state; let button; button= <TouchableOpacity style={Buttons.brownbuttonSmall} onPress={() => this.setModalVisible(!modalVisible)}> <Text style={{color:'white', alignSelf: "center"}}>Save</Text> </TouchableOpacity> return ( <View style={styles.container}> {/* Map and current route display */} <View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}> <MapView initialRegion={{ latitude: LATITUDE, longitude: LONGITUDE, latitudeDelta: LATITUDE_DELTA, longitudeDelta: LONGITUDE_DELTA, }} style={StyleSheet.absoluteFill} ref={c => this.mapView = c} onPress={this.onMapPress} zoomEnabled={true} > {this.state.coordinates.map((coordinate, index) => <Marker key={`coordinate_${index}`} coordinate={coordinate} /> )} {(this.state.coordinates.length >= 2) && ( <MapViewDirections origin={this.state.coordinates[0]} destination={this.state.coordinates[this.state.coordinates.length-1]} waypoints={this.state.coordinates} mode="WALKING" apikey={GOOGLE_MAPS_APIKEY} strokeWidth={3} strokeColor="blue" optimizeWaypoints={true} onStart={(params) => { }} onReady={result => { this.setState({ dis: result.distance, dur: result.duration, forZoom: result }) }} onError={(errorMessage) => { }} /> )} <Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}> <MaterialIcons name="my-location" size={24} color={Colors.brown} /> </Marker> </MapView> <Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}> Click to where you want to go on the map </Text> {/* Current Path window on map */} <View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}> <Text style={styles.title}>{this.state.currentPath}</Text> <View style={{flexDirection: "row"}}> <View style={{width: '50%'}}> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text> </View> <View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}> {button} </View> </View> </View> </View> <TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}} onPress={ this.onStartWalk } > <Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text> </TouchableOpacity> {/* Window for saving a route */} <Modal style ={{marginTop: "50%"}} animationType="slide" transparent={true} visible={modalVisible} onRequestClose={() => { this.setModalVisible(!modalVisible); }} > <View style={{marginTop: '50%'}}> <View style={styles.saveView}> <Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text> <TextInput style={styles.input} placeholder="Enter a title" onChangeText={(name) => this.setState({name})} /> <Text style={styles.modalText}>Description</Text> <TextInput style={styles.input} placeholder="Enter a description" returnKeyType="done" onChangeText={(description) => this.setState({description})} /> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.saveWalk(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Save</Text> </TouchableOpacity> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.setModalVisible(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text> </TouchableOpacity> </View> </View> </Modal> {/* Explore other routes portion */} <View style={{width: '100%', height: '65%', alignSelf: 'center', flex: 1, backgroundColor: '#F4ECC6', padding: 3, borderWidth: 1, borderColor: '#675a5a'}}> <SafeAreaView style={{marginBottom: '6%'}}> <Text style={styles.title}> Explore </Text> <FlatList data={this.state.walks} renderItem={({item}) => ( <TouchableOpacity style={styles.item} onPress={() => this.setPremadePath(item)}> <Text style={styles.pathTitle}>{item.name}</Text> <Text style={styles.detailsTwo}>{item.description}</Text> </TouchableOpacity> )} keyExtractor={item => (item.id).toString()} /> </SafeAreaView> </View> </View> ); } } const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: Colors.yellow, alignItems: 'center', justifyContent: 'flex-start', }, title :{ fontSize: 17, color: '#675A5A', fontWeight: "500", alignSelf: 'center', }, item: { backgroundColor: '#fffae3', padding: '2%', marginVertical: '2%', justifyContent: 'flex-start', }, pathTitle: { fontSize: 15, fontWeight: '500', color: '#675A5A', alignSelf: 'flex-start', marginBottom: '1%' }, detailsOne: { fontSize: 12 }, detailsTwo: { fontSize: 12, fontStyle: 'italic', marginTop: '1%', }, saveView: { backgroundColor: Colors.lightblue, width: "80%", height: '68%', alignSelf: "center", borderRadius: 10, borderWidth: 2, borderColor: Colors.brown, }, modalText: { alignSelf: 'center', fontSize: 15, padding: 1, color: Colors.brown, fontWeight: "500" }, input: { width: "80%", borderWidth: 1, height: '15%', marginVertical: '3%', padding: '2%', fontSize: 15, backgroundColor: '#D7EBF4', borderColor: '#675a5a', borderBottomWidth: 3, alignSelf: 'center' } });
render
identifier_name
MapScreen.js
import React, { Component } from 'react'; import { FlatList, SafeAreaView, Dimensions, StyleSheet, View, Image, TextInput, Modal } from 'react-native'; import MapView from 'react-native-maps'; import MapViewDirections from 'react-native-maps-directions'; import { Marker } from "react-native-maps"; import { Text } from 'react-native'; import { TouchableOpacity } from 'react-native-gesture-handler'; import Buttons from '../styles/Buttons.js' import Colors from '../styles/Colors.js'; import { MaterialIcons } from '@expo/vector-icons'; import axios from 'axios' const {height, width} = Dimensions.get('window'); const LATITUDE = 34.06279; const LONGITUDE = -118.44390; const LATITUDE_DELTA = 0.0922; const LONGITUDE_DELTA = .05; const GOOGLE_MAPS_APIKEY = 'AIzaSyBx4y6okTMakFLVwR5PKVN9kyqpbJFykrE'; export default class MapScreen extends Component { constructor(props) { super(props); this.state = { startValue: 'Start', initialCoords:[ {latitude:34.073026, longitude:-118.465619}, {latitude:34.067223, longitude:-118.410851} ], coordinates: [ { latitude: 34.06279, longitude: -118.44390, }, { latitude: 34.06241, longitude: -118.44375, }, ], clocation: { latitude: 34.06637, longitude:-118.44524, }, dur: null, dis: null, saveWalk:{ startingLocation: null, destinationLocation: null, forZoom: { distance: Number, duration: Number, coordinates: [] } }, modalVisible: false, currentPath: "Current Path", premadePath: false, description: "", name: "", }; this.mapView = null; } // Start and Stop route button functionality onStartWalk = () =>{ this.setState({ saveWalk:{ startingLocation:this.state.coordinates[1], destinationLocation: this.state.coordinates[0]}, }) if(this.state.startValue=='Start'){ this.setState({ startValue:'Stop' }); this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{ edgePadding: { right: (width / 10), bottom: (height / 20), left: (width / 10), top: (height / 20), } } ); } else{ this.setState({ startValue:'Start' }); this.mapView.fitToCoordinates(this.state.initialCoords,{ edgePadding: { right: width, bottom: height, left: width, top: height } } ); } } // Saves user's walks to database saveWalk = (visible) => { var params = JSON.stringify({ 'name': this.state.name, 'description': this.state.description, 'coordinates': this.state.coordinates, 'profile': [global.session_id], }); axios .post("http://127.0.0.1:8000/walks/", params, {"headers": { 'content-Type': 'application/json' }}) .then(this.setModalVisible(visible)) .catch(error => console.log(error) ); console.log(params) } setModalVisible = (visible) => { this.setState({modalVisible:visible}); } // When map is pressed, route is created from current location onMapPress = (e) => { this.setState({ coordinates: [ e.nativeEvent.coordinate, this.state.clocation ], currentPath: "Current Path", premadePath: false, }); } onRoutePress = (long, lat ) => { this.setState({ coordinates: [ {latitude: lat, longitude: long}, this.state.clocation ], }); } // Getting walks data for explore portion componentDidMount() { this.currentLocation(); this.intervalID = setInterval(this.currentLocation.bind(this), 1000); axios .get("http://127.0.0.1:8000/walks") .then(response => this.setState({walks: response.data.results})) .catch(error => console.log(error) ); } componentWillUnmount() { clearInterval(this.intervalID); } currentLocation = () => { navigator.geolocation.getCurrentPosition( position => { const latitude = position.coords.latitude; const longitude = position.coords.longitude; this.setState({ clocation: {latitude, longitude} }); }, { enableHighAccuracy: true, timeout:20000, maximumAge: 1000} ); }; setPremadePath = (item) => { this.setState({ coordinates: item.coordinates, currentPath: item.name, premadePath: true, }) console.log(item.coordinates) } render() { const { modalVisible } = this.state; let button; button= <TouchableOpacity style={Buttons.brownbuttonSmall} onPress={() => this.setModalVisible(!modalVisible)}> <Text style={{color:'white', alignSelf: "center"}}>Save</Text> </TouchableOpacity> return ( <View style={styles.container}> {/* Map and current route display */} <View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}> <MapView initialRegion={{ latitude: LATITUDE, longitude: LONGITUDE, latitudeDelta: LATITUDE_DELTA, longitudeDelta: LONGITUDE_DELTA, }} style={StyleSheet.absoluteFill} ref={c => this.mapView = c} onPress={this.onMapPress} zoomEnabled={true} > {this.state.coordinates.map((coordinate, index) => <Marker key={`coordinate_${index}`} coordinate={coordinate} /> )} {(this.state.coordinates.length >= 2) && ( <MapViewDirections origin={this.state.coordinates[0]} destination={this.state.coordinates[this.state.coordinates.length-1]} waypoints={this.state.coordinates} mode="WALKING" apikey={GOOGLE_MAPS_APIKEY} strokeWidth={3} strokeColor="blue" optimizeWaypoints={true} onStart={(params) => { }} onReady={result => { this.setState({ dis: result.distance, dur: result.duration, forZoom: result }) }} onError={(errorMessage) => { }} /> )} <Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}> <MaterialIcons name="my-location" size={24} color={Colors.brown} /> </Marker> </MapView> <Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}> Click to where you want to go on the map </Text> {/* Current Path window on map */} <View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}> <Text style={styles.title}>{this.state.currentPath}</Text> <View style={{flexDirection: "row"}}> <View style={{width: '50%'}}> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text> </View> <View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}> {button} </View> </View> </View> </View> <TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}} onPress={ this.onStartWalk } > <Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text> </TouchableOpacity> {/* Window for saving a route */} <Modal style ={{marginTop: "50%"}} animationType="slide" transparent={true} visible={modalVisible} onRequestClose={() => { this.setModalVisible(!modalVisible); }} > <View style={{marginTop: '50%'}}> <View style={styles.saveView}> <Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text> <TextInput style={styles.input} placeholder="Enter a title" onChangeText={(name) => this.setState({name})} /> <Text style={styles.modalText}>Description</Text> <TextInput style={styles.input} placeholder="Enter a description" returnKeyType="done" onChangeText={(description) => this.setState({description})} /> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.saveWalk(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Save</Text> </TouchableOpacity> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.setModalVisible(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text> </TouchableOpacity> </View> </View> </Modal> {/* Explore other routes portion */} <View style={{width: '100%', height: '65%', alignSelf: 'center', flex: 1, backgroundColor: '#F4ECC6', padding: 3, borderWidth: 1, borderColor: '#675a5a'}}> <SafeAreaView style={{marginBottom: '6%'}}> <Text style={styles.title}> Explore </Text> <FlatList
<Text style={styles.pathTitle}>{item.name}</Text> <Text style={styles.detailsTwo}>{item.description}</Text> </TouchableOpacity> )} keyExtractor={item => (item.id).toString()} /> </SafeAreaView> </View> </View> ); } } const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: Colors.yellow, alignItems: 'center', justifyContent: 'flex-start', }, title :{ fontSize: 17, color: '#675A5A', fontWeight: "500", alignSelf: 'center', }, item: { backgroundColor: '#fffae3', padding: '2%', marginVertical: '2%', justifyContent: 'flex-start', }, pathTitle: { fontSize: 15, fontWeight: '500', color: '#675A5A', alignSelf: 'flex-start', marginBottom: '1%' }, detailsOne: { fontSize: 12 }, detailsTwo: { fontSize: 12, fontStyle: 'italic', marginTop: '1%', }, saveView: { backgroundColor: Colors.lightblue, width: "80%", height: '68%', alignSelf: "center", borderRadius: 10, borderWidth: 2, borderColor: Colors.brown, }, modalText: { alignSelf: 'center', fontSize: 15, padding: 1, color: Colors.brown, fontWeight: "500" }, input: { width: "80%", borderWidth: 1, height: '15%', marginVertical: '3%', padding: '2%', fontSize: 15, backgroundColor: '#D7EBF4', borderColor: '#675a5a', borderBottomWidth: 3, alignSelf: 'center' } });
data={this.state.walks} renderItem={({item}) => ( <TouchableOpacity style={styles.item} onPress={() => this.setPremadePath(item)}>
random_line_split
MapScreen.js
import React, { Component } from 'react'; import { FlatList, SafeAreaView, Dimensions, StyleSheet, View, Image, TextInput, Modal } from 'react-native'; import MapView from 'react-native-maps'; import MapViewDirections from 'react-native-maps-directions'; import { Marker } from "react-native-maps"; import { Text } from 'react-native'; import { TouchableOpacity } from 'react-native-gesture-handler'; import Buttons from '../styles/Buttons.js' import Colors from '../styles/Colors.js'; import { MaterialIcons } from '@expo/vector-icons'; import axios from 'axios' const {height, width} = Dimensions.get('window'); const LATITUDE = 34.06279; const LONGITUDE = -118.44390; const LATITUDE_DELTA = 0.0922; const LONGITUDE_DELTA = .05; const GOOGLE_MAPS_APIKEY = 'AIzaSyBx4y6okTMakFLVwR5PKVN9kyqpbJFykrE'; export default class MapScreen extends Component { constructor(props) { super(props); this.state = { startValue: 'Start', initialCoords:[ {latitude:34.073026, longitude:-118.465619}, {latitude:34.067223, longitude:-118.410851} ], coordinates: [ { latitude: 34.06279, longitude: -118.44390, }, { latitude: 34.06241, longitude: -118.44375, }, ], clocation: { latitude: 34.06637, longitude:-118.44524, }, dur: null, dis: null, saveWalk:{ startingLocation: null, destinationLocation: null, forZoom: { distance: Number, duration: Number, coordinates: [] } }, modalVisible: false, currentPath: "Current Path", premadePath: false, description: "", name: "", }; this.mapView = null; } // Start and Stop route button functionality onStartWalk = () =>{ this.setState({ saveWalk:{ startingLocation:this.state.coordinates[1], destinationLocation: this.state.coordinates[0]}, }) if(this.state.startValue=='Start')
else{ this.setState({ startValue:'Start' }); this.mapView.fitToCoordinates(this.state.initialCoords,{ edgePadding: { right: width, bottom: height, left: width, top: height } } ); } } // Saves user's walks to database saveWalk = (visible) => { var params = JSON.stringify({ 'name': this.state.name, 'description': this.state.description, 'coordinates': this.state.coordinates, 'profile': [global.session_id], }); axios .post("http://127.0.0.1:8000/walks/", params, {"headers": { 'content-Type': 'application/json' }}) .then(this.setModalVisible(visible)) .catch(error => console.log(error) ); console.log(params) } setModalVisible = (visible) => { this.setState({modalVisible:visible}); } // When map is pressed, route is created from current location onMapPress = (e) => { this.setState({ coordinates: [ e.nativeEvent.coordinate, this.state.clocation ], currentPath: "Current Path", premadePath: false, }); } onRoutePress = (long, lat ) => { this.setState({ coordinates: [ {latitude: lat, longitude: long}, this.state.clocation ], }); } // Getting walks data for explore portion componentDidMount() { this.currentLocation(); this.intervalID = setInterval(this.currentLocation.bind(this), 1000); axios .get("http://127.0.0.1:8000/walks") .then(response => this.setState({walks: response.data.results})) .catch(error => console.log(error) ); } componentWillUnmount() { clearInterval(this.intervalID); } currentLocation = () => { navigator.geolocation.getCurrentPosition( position => { const latitude = position.coords.latitude; const longitude = position.coords.longitude; this.setState({ clocation: {latitude, longitude} }); }, { enableHighAccuracy: true, timeout:20000, maximumAge: 1000} ); }; setPremadePath = (item) => { this.setState({ coordinates: item.coordinates, currentPath: item.name, premadePath: true, }) console.log(item.coordinates) } render() { const { modalVisible } = this.state; let button; button= <TouchableOpacity style={Buttons.brownbuttonSmall} onPress={() => this.setModalVisible(!modalVisible)}> <Text style={{color:'white', alignSelf: "center"}}>Save</Text> </TouchableOpacity> return ( <View style={styles.container}> {/* Map and current route display */} <View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}> <MapView initialRegion={{ latitude: LATITUDE, longitude: LONGITUDE, latitudeDelta: LATITUDE_DELTA, longitudeDelta: LONGITUDE_DELTA, }} style={StyleSheet.absoluteFill} ref={c => this.mapView = c} onPress={this.onMapPress} zoomEnabled={true} > {this.state.coordinates.map((coordinate, index) => <Marker key={`coordinate_${index}`} coordinate={coordinate} /> )} {(this.state.coordinates.length >= 2) && ( <MapViewDirections origin={this.state.coordinates[0]} destination={this.state.coordinates[this.state.coordinates.length-1]} waypoints={this.state.coordinates} mode="WALKING" apikey={GOOGLE_MAPS_APIKEY} strokeWidth={3} strokeColor="blue" optimizeWaypoints={true} onStart={(params) => { }} onReady={result => { this.setState({ dis: result.distance, dur: result.duration, forZoom: result }) }} onError={(errorMessage) => { }} /> )} <Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}> <MaterialIcons name="my-location" size={24} color={Colors.brown} /> </Marker> </MapView> <Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}> Click to where you want to go on the map </Text> {/* Current Path window on map */} <View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}> <Text style={styles.title}>{this.state.currentPath}</Text> <View style={{flexDirection: "row"}}> <View style={{width: '50%'}}> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text> <Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text> </View> <View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}> {button} </View> </View> </View> </View> <TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}} onPress={ this.onStartWalk } > <Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text> </TouchableOpacity> {/* Window for saving a route */} <Modal style ={{marginTop: "50%"}} animationType="slide" transparent={true} visible={modalVisible} onRequestClose={() => { this.setModalVisible(!modalVisible); }} > <View style={{marginTop: '50%'}}> <View style={styles.saveView}> <Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text> <TextInput style={styles.input} placeholder="Enter a title" onChangeText={(name) => this.setState({name})} /> <Text style={styles.modalText}>Description</Text> <TextInput style={styles.input} placeholder="Enter a description" returnKeyType="done" onChangeText={(description) => this.setState({description})} /> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.saveWalk(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Save</Text> </TouchableOpacity> <TouchableOpacity style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]} onPress={() => this.setModalVisible(!modalVisible)} > <Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text> </TouchableOpacity> </View> </View> </Modal> {/* Explore other routes portion */} <View style={{width: '100%', height: '65%', alignSelf: 'center', flex: 1, backgroundColor: '#F4ECC6', padding: 3, borderWidth: 1, borderColor: '#675a5a'}}> <SafeAreaView style={{marginBottom: '6%'}}> <Text style={styles.title}> Explore </Text> <FlatList data={this.state.walks} renderItem={({item}) => ( <TouchableOpacity style={styles.item} onPress={() => this.setPremadePath(item)}> <Text style={styles.pathTitle}>{item.name}</Text> <Text style={styles.detailsTwo}>{item.description}</Text> </TouchableOpacity> )} keyExtractor={item => (item.id).toString()} /> </SafeAreaView> </View> </View> ); } } const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: Colors.yellow, alignItems: 'center', justifyContent: 'flex-start', }, title :{ fontSize: 17, color: '#675A5A', fontWeight: "500", alignSelf: 'center', }, item: { backgroundColor: '#fffae3', padding: '2%', marginVertical: '2%', justifyContent: 'flex-start', }, pathTitle: { fontSize: 15, fontWeight: '500', color: '#675A5A', alignSelf: 'flex-start', marginBottom: '1%' }, detailsOne: { fontSize: 12 }, detailsTwo: { fontSize: 12, fontStyle: 'italic', marginTop: '1%', }, saveView: { backgroundColor: Colors.lightblue, width: "80%", height: '68%', alignSelf: "center", borderRadius: 10, borderWidth: 2, borderColor: Colors.brown, }, modalText: { alignSelf: 'center', fontSize: 15, padding: 1, color: Colors.brown, fontWeight: "500" }, input: { width: "80%", borderWidth: 1, height: '15%', marginVertical: '3%', padding: '2%', fontSize: 15, backgroundColor: '#D7EBF4', borderColor: '#675a5a', borderBottomWidth: 3, alignSelf: 'center' } });
{ this.setState({ startValue:'Stop' }); this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{ edgePadding: { right: (width / 10), bottom: (height / 20), left: (width / 10), top: (height / 20), } } ); }
conditional_block
yolov5_trt12.py
""" An example that uses TensorRT's Python api to make inferences. """ import ctypes import os import random import sys import threading import time import cv2 import numpy as np import tensorrt as trt import torch import torchvision from trt_lite2 import TrtLite INPUT_W = 256 INPUT_H = 256 CONF_THRESH = 0.1 IOU_THRESHOLD = 0.4 labels = ['one', 'five', 'first', 'ok', 'heart single', 'yearh', 'three', 'four', 'six', 'i love you', 'gun', 'thumb up', 'nine', 'pink'] BATCH_SIZE = 1 ENGINE_PATH_21 = "./engine/resnet50_21.engine" ENGINE_PATH_GESTURE = "./engine/resnet50-gesture.engine" def plot_one_box(x, img, color=None, label=None, line_thickness=None): """ description: Plots one bounding box on image img, this function comes from YoLov5 project. param: x: a box likes [x1,y1,x2,y2] img: a opencv image object color: color to draw rectangle, such as (0,255,0) label: str line_thickness: int return: no return """ tl = ( line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 ) # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText( img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA, ) def draw_bd_handpose(img_,hand_,x,y): thick = 2 colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)] # cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick) def drawhand(img,outputs,img_width,img_height): print(outputs) pts_hand = {} for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) pts_hand[str(i)] = {} pts_hand[str(i)] = { "x": x, "y": y, } draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线 # ------------- 绘制关键点 for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1) cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1) class YoLov5TRT(object): """ description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops. """ def __init__(self, engine_file_path): TRT_LOGGER = trt.Logger(trt.Logger.INFO) trt_yolo = TrtLite(engine_file_path=engine_file_path) trt_yolo.print_info() self.buffers = trt_yolo.allocate_io_buffers(1, True) self.trt_yolo = trt_yolo # 识别人手的21个关键点 self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21) self.trt_lite21.print_info() # 识别手势 self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE) self.trt_lite_gesture.print_info() def doInference(self,image_path): threading.Thread.__init__(self) # Do image preprocess input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path) self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda() bindings = [t.data_ptr() for t in self.buffers] self.trt_yolo.execute(bindings, BATCH_SIZE) host_outputs = self.buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() print(host_outputs.shape) output = host_outputs.ravel() # Do postprocess result_boxes, result_scores, result_classid = self.post_process( output, origin_h, origin_w ) print(output.shape,len(result_boxes)) # Draw rectangles and labels on the original image for i in range(len(result_boxes)): box = result_boxes[i] print("box>>>",box) # 截出手的部位 image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])] # 推理手的21个特征点 hand_data = self.preprocess_hand(image_hand) output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel()) # 推理手势 output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel()) print("gesture:",output_gesture) index = np.argmax(output_gesture) label = labels[index] hand_width = int(box[2])-int(box[0]) hand_height = int(box[3])-int(box[1]) drawhand(image_hand,output21,hand_width,hand_height) print("w,h:",hand_width,hand_height) cv2.imwrite("hand_11.jpg", image_hand) plot_one_box( box, image_raw, label="{}:{:.2f}".format( label, result_scores[i] ), ) parent, filename = os.path.split(input_image_path) save_name = os.path.join(parent, "output_" + filename) #  Save image cv2.imwrite(save_name, image_raw) print("save img success") def doInference_resnet(self,trt_engine, data): i2shape = 1 io_info = trt_eng
shape) print(io_info) d_buffers = trt_engine.allocate_io_buffers(i2shape, True) print(io_info[1][2]) d_buffers[0] = data.cuda() bindings = [t.data_ptr() for t in d_buffers] # 进行推理 trt_engine.execute(bindings, i2shape) # output_data_trt = d_buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() host_out = output_data_trt.ravel() return host_out def preprocess_hand(self,img): img_width = img.shape[1] img_height = img.shape[0] print(img.shape) # 输入图片预处理 img_ = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC) img_ = img_.astype(np.float32) img_ = (img_ - 128.) / 256. img_ = img_.transpose(2, 0, 1) img_ = torch.from_numpy(img_) img_ = img_.unsqueeze_(0) return img_ def preprocess_image(self, input_image_path): """ description: Read an image from image path, convert it to RGB, resize and pad it to target size, normalize to [0,1], transform to NCHW format. param: input_image_path: str, image path return: image: the processed image image_raw: the original image h: original height w: original width """ image_raw = cv2.imread(input_image_path) h, w, c = image_raw.shape image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB) # Calculate widht and height and paddings r_w = INPUT_W / w r_h = INPUT_H / h if r_h > r_w: tw = INPUT_W th = int(r_w * h) tx1 = tx2 = 0 ty1 = int((INPUT_H - th) / 2) ty2 = INPUT_H - th - ty1 else: tw = int(r_h * w) th = INPUT_H tx1 = int((INPUT_W - tw) / 2) tx2 = INPUT_W - tw - tx1 ty1 = ty2 = 0 # Resize the image with long side while maintaining ratio image = cv2.resize(image, (tw, th)) # Pad the short side with (128,128,128) image = cv2.copyMakeBorder( image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128) ) image = image.astype(np.float32) # Normalize to [0,1] image /= 255.0 # HWC to CHW format: image = np.transpose(image, [2, 0, 1]) # CHW to NCHW format image = np.expand_dims(image, axis=0) # Convert the image to row-major order, also known as "C order": image = np.ascontiguousarray(image) return image, image_raw, h, w def xywh2xyxy(self, origin_h, origin_w, x): """ description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right param: origin_h: height of original image origin_w: width of original image x: A boxes tensor, each row is a box [center_x, center_y, w, h] return: y: A boxes tensor, each row is a box [x1, y1, x2, y2] """ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) r_w = INPUT_W / origin_w r_h = INPUT_H / origin_h if r_h > r_w: y[:, 0] = x[:, 0] - x[:, 2] / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y /= r_w else: y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 y /= r_h return y def post_process(self, output, origin_h, origin_w): """ description: postprocess the prediction param: output: A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h: height of original image origin_w: width of original image return: result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2] result_scores: finally scores, a tensor, each element is the score correspoing to box result_classid: finally classid, a tensor, each element is the classid correspoing to box """ # Get the num of boxes detected num = int(output[0]) # Reshape to a two dimentional ndarray pred = np.reshape(output[1:], (-1, 6))[:num, :] # to a torch Tensor pred = torch.Tensor(pred).cuda() # Get the boxes boxes = pred[:, :4] # Get the scores scores = pred[:, 4] # Get the classid classid = pred[:, 5] # Choose those boxes that score > CONF_THRESH si = scores > CONF_THRESH boxes = boxes[si, :] scores = scores[si] classid = classid[si] # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2] boxes = self.xywh2xyxy(origin_h, origin_w, boxes) # Do nms indices = torchvision.ops.nms(boxes, scores, iou_threshold=IOU_THRESHOLD).cpu() result_boxes = boxes[indices, :].cpu() result_scores = scores[indices].cpu() result_classid = classid[indices].cpu() return result_boxes, result_scores, result_classid class myThread(threading.Thread): def __init__(self, func, args): threading.Thread.__init__(self) self.func = func self.args = args def run(self): self.func(*self.args) if __name__ == "__main__": # load custom plugins PLUGIN_LIBRARY = "./libmyplugins.so" ctypes.CDLL(PLUGIN_LIBRARY) engine_file_path = "./yolov5s-hand-docker-comm.engine" # load coco labels categories = ["hand"] # a YoLov5TRT instance yolov5_wrapper = YoLov5TRT(engine_file_path) input_image_paths = ["images/2.jpg"] for input_image_path in input_image_paths: # create a new thread to do inference thread1 = myThread(yolov5_wrapper.doInference, ["./"+input_image_path]) thread1.start() thread1.join() # destroy the instance # yolov5_wrapper.destroy()
ine.get_io_info(i2
identifier_name
yolov5_trt12.py
""" An example that uses TensorRT's Python api to make inferences. """ import ctypes import os import random import sys import threading import time import cv2 import numpy as np import tensorrt as trt import torch import torchvision from trt_lite2 import TrtLite INPUT_W = 256 INPUT_H = 256 CONF_THRESH = 0.1 IOU_THRESHOLD = 0.4 labels = ['one', 'five', 'first', 'ok', 'heart single', 'yearh', 'three', 'four', 'six', 'i love you', 'gun', 'thumb up', 'nine', 'pink'] BATCH_SIZE = 1 ENGINE_PATH_21 = "./engine/resnet50_21.engine" ENGINE_PATH_GESTURE = "./engine/resnet50-gesture.engine" def plot_one_box(x, img, color=None, label=None, line_thickness=None): """ description: Plots one bounding box on image img, this function comes from YoLov5 project. param: x: a box likes [x1,y1,x2,y2] img: a opencv image object color: color to draw rectangle, such as (0,255,0) label: str line_thickness: int return: no return """ tl = ( line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 ) # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText( img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA, ) def draw_bd_handpose(img_,hand_,x,y): thick = 2 colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)] # cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick) def drawhand(img,outputs,img_width,img_height): print(outputs) pts_hand = {} for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) pts_hand[str(i)] = {} pts_hand[str(i)] = { "x": x, "y": y, } draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线 # ------------- 绘制关键点 for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1) cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1) class YoLov5TRT(object): """ description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops. """ def __init__(self, engine_file_path): TRT_LOGGER = trt.Logger(trt.Logger.INFO) trt_yolo = TrtLite(engine_file_path=engine_file_path) trt_yolo.print_info() self.buffers = trt_yolo.allocate_io_buffers(1, True) self.trt_yolo = trt_yolo # 识别人手的21个关键点 self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21) self.trt_lite21.print_info() # 识别手势 self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE) self.trt_lite_gesture.print_info() def doInference(self,image_path): threading.Thread.__init__(self) # Do imag
= trt_engine.get_io_info(i2shape) print(io_info) d_buffers = trt_engine.allocate_io_buffers(i2shape, True) print(io_info[1][2]) d_buffers[0] = data.cuda() bindings = [t.data_ptr() for t in d_buffers] # 进行推理 trt_engine.execute(bindings, i2shape) # output_data_trt = d_buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() host_out = output_data_trt.ravel() return host_out def preprocess_hand(self,img): img_width = img.shape[1] img_height = img.shape[0] print(img.shape) # 输入图片预处理 img_ = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC) img_ = img_.astype(np.float32) img_ = (img_ - 128.) / 256. img_ = img_.transpose(2, 0, 1) img_ = torch.from_numpy(img_) img_ = img_.unsqueeze_(0) return img_ def preprocess_image(self, input_image_path): """ description: Read an image from image path, convert it to RGB, resize and pad it to target size, normalize to [0,1], transform to NCHW format. param: input_image_path: str, image path return: image: the processed image image_raw: the original image h: original height w: original width """ image_raw = cv2.imread(input_image_path) h, w, c = image_raw.shape image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB) # Calculate widht and height and paddings r_w = INPUT_W / w r_h = INPUT_H / h if r_h > r_w: tw = INPUT_W th = int(r_w * h) tx1 = tx2 = 0 ty1 = int((INPUT_H - th) / 2) ty2 = INPUT_H - th - ty1 else: tw = int(r_h * w) th = INPUT_H tx1 = int((INPUT_W - tw) / 2) tx2 = INPUT_W - tw - tx1 ty1 = ty2 = 0 # Resize the image with long side while maintaining ratio image = cv2.resize(image, (tw, th)) # Pad the short side with (128,128,128) image = cv2.copyMakeBorder( image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128) ) image = image.astype(np.float32) # Normalize to [0,1] image /= 255.0 # HWC to CHW format: image = np.transpose(image, [2, 0, 1]) # CHW to NCHW format image = np.expand_dims(image, axis=0) # Convert the image to row-major order, also known as "C order": image = np.ascontiguousarray(image) return image, image_raw, h, w def xywh2xyxy(self, origin_h, origin_w, x): """ description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right param: origin_h: height of original image origin_w: width of original image x: A boxes tensor, each row is a box [center_x, center_y, w, h] return: y: A boxes tensor, each row is a box [x1, y1, x2, y2] """ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) r_w = INPUT_W / origin_w r_h = INPUT_H / origin_h if r_h > r_w: y[:, 0] = x[:, 0] - x[:, 2] / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y /= r_w else: y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 y /= r_h return y def post_process(self, output, origin_h, origin_w): """ description: postprocess the prediction param: output: A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h: height of original image origin_w: width of original image return: result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2] result_scores: finally scores, a tensor, each element is the score correspoing to box result_classid: finally classid, a tensor, each element is the classid correspoing to box """ # Get the num of boxes detected num = int(output[0]) # Reshape to a two dimentional ndarray pred = np.reshape(output[1:], (-1, 6))[:num, :] # to a torch Tensor pred = torch.Tensor(pred).cuda() # Get the boxes boxes = pred[:, :4] # Get the scores scores = pred[:, 4] # Get the classid classid = pred[:, 5] # Choose those boxes that score > CONF_THRESH si = scores > CONF_THRESH boxes = boxes[si, :] scores = scores[si] classid = classid[si] # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2] boxes = self.xywh2xyxy(origin_h, origin_w, boxes) # Do nms indices = torchvision.ops.nms(boxes, scores, iou_threshold=IOU_THRESHOLD).cpu() result_boxes = boxes[indices, :].cpu() result_scores = scores[indices].cpu() result_classid = classid[indices].cpu() return result_boxes, result_scores, result_classid class myThread(threading.Thread): def __init__(self, func, args): threading.Thread.__init__(self) self.func = func self.args = args def run(self): self.func(*self.args) if __name__ == "__main__": # load custom plugins PLUGIN_LIBRARY = "./libmyplugins.so" ctypes.CDLL(PLUGIN_LIBRARY) engine_file_path = "./yolov5s-hand-docker-comm.engine" # load coco labels categories = ["hand"] # a YoLov5TRT instance yolov5_wrapper = YoLov5TRT(engine_file_path) input_image_paths = ["images/2.jpg"] for input_image_path in input_image_paths: # create a new thread to do inference thread1 = myThread(yolov5_wrapper.doInference, ["./"+input_image_path]) thread1.start() thread1.join() # destroy the instance # yolov5_wrapper.destroy()
e preprocess input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path) self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda() bindings = [t.data_ptr() for t in self.buffers] self.trt_yolo.execute(bindings, BATCH_SIZE) host_outputs = self.buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() print(host_outputs.shape) output = host_outputs.ravel() # Do postprocess result_boxes, result_scores, result_classid = self.post_process( output, origin_h, origin_w ) print(output.shape,len(result_boxes)) # Draw rectangles and labels on the original image for i in range(len(result_boxes)): box = result_boxes[i] print("box>>>",box) # 截出手的部位 image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])] # 推理手的21个特征点 hand_data = self.preprocess_hand(image_hand) output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel()) # 推理手势 output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel()) print("gesture:",output_gesture) index = np.argmax(output_gesture) label = labels[index] hand_width = int(box[2])-int(box[0]) hand_height = int(box[3])-int(box[1]) drawhand(image_hand,output21,hand_width,hand_height) print("w,h:",hand_width,hand_height) cv2.imwrite("hand_11.jpg", image_hand) plot_one_box( box, image_raw, label="{}:{:.2f}".format( label, result_scores[i] ), ) parent, filename = os.path.split(input_image_path) save_name = os.path.join(parent, "output_" + filename) #  Save image cv2.imwrite(save_name, image_raw) print("save img success") def doInference_resnet(self,trt_engine, data): i2shape = 1 io_info
identifier_body
yolov5_trt12.py
""" An example that uses TensorRT's Python api to make inferences. """ import ctypes import os import random import sys import threading import time import cv2 import numpy as np import tensorrt as trt import torch import torchvision from trt_lite2 import TrtLite INPUT_W = 256 INPUT_H = 256 CONF_THRESH = 0.1 IOU_THRESHOLD = 0.4 labels = ['one', 'five', 'first', 'ok', 'heart single', 'yearh', 'three', 'four', 'six', 'i love you', 'gun', 'thumb up', 'nine', 'pink'] BATCH_SIZE = 1 ENGINE_PATH_21 = "./engine/resnet50_21.engine" ENGINE_PATH_GESTURE = "./engine/resnet50-gesture.engine" def plot_one_box(x, img, color=None, label=None, line_thickness=None): """ description: Plots one bounding box on image img, this function comes from YoLov5 project. param: x: a box likes [x1,y1,x2,y2] img: a opencv image object color: color to draw rectangle, such as (0,255,0) label: str line_thickness: int return: no return """ tl = ( line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 ) # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText( img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA, ) def draw_bd_handpose(img_,hand_,x,y): thick = 2 colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)] # cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick) def drawhand(img,outputs,img_width,img_height): print(outputs) pts_hand = {} for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) pts_hand[str(i)] = {} pts_hand[str(i)] = { "x": x, "y": y, } draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线 # ------------- 绘制关键点 for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1) cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1) class YoLov5TRT(object): """ description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops. """ def __init__(self, engine_file_path): TRT_LOGGER = trt.Logger(trt.Logger.INFO) trt_yolo = TrtLite(engine_file_path=engine_file_path) trt_yolo.print_info() self.buffers = trt_yolo.allocate_io_buffers(1, True) self.trt_yolo = trt_yolo # 识别人手的21个关键点 self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21) self.trt_lite21.print_info() # 识别手势 self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE) self.trt_lite_gesture.print_info() def doInference(self,image_path): threading.Thread.__init__(self) # Do image preprocess input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path) self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda() bindings = [t.data_ptr() for t in self.buffers] self.trt_yolo.execute(bindings, BATCH_SIZE) host_outputs = self.buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() print(host_outputs.shape) output = host_outputs.ravel() # Do postprocess result_boxes, result_scores, result_classid = self.post_process( output, origin_h, origin_w ) print(output.shape,len(result_boxes)) # Draw rectangles and labels on the original image for i in range(len(result_boxes)): box = result_boxes[i] print("box>>>",box) # 截出手的部位 image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])] # 推理手的21个特征点 hand_data = self.preprocess_hand(image_hand) output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel()) # 推理手势 output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel()) print("gesture:",output_gesture) index = np.argmax(output_gesture) label = labels[index] hand_width = int(box[2])-int(box[0]) hand_height = int(box[3])-int(box[1]) drawhand(image_hand,output21,hand_width,hand_height) print("w,h:",hand_width,hand_height) cv2.imwrite("hand_11.jpg", image_hand) plot_one_box( box, image_raw, label="{}:{:.2f}".format( label, result_scores[i] ), ) parent, filename = os.path.split(input_image_path) save_name = os.path.join(parent, "output_" + filename) #  Save image cv2.imwrite(save_name, image_raw) print("save img success") def doInference_resnet(self,trt_engine, data): i2shape = 1 io_info = trt_engine.get_io_info(i2shape) print(io_info) d_buffers = trt_engine.allocate_io_buffers(i2shape, True) print(io_info[1][2]) d_buffers[0] = data.cuda() bindings = [t.data_ptr() for t in d_buffers] # 进行推理 trt_engine.execute(bindings, i2shape) # output_data_trt = d_buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() host_out = output_data_trt.ravel() return host_out def preprocess_hand(self,img): img_width = img.shape[1] img_height = img.shape[0] print(img.shape) # 输入图片预处理 img_ = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC) img_ = img_.astype(np.float32) img_ = (img_ - 128.) / 256. img_ = img_.transpose(2, 0, 1) img_ = torch.from_numpy(img_) img_ = img_.unsqueeze_(0) return img_ def preprocess_image(self, input_image_path): """ description: Read an image from image path, convert it to RGB, resize and pad it to target size, normalize to [0,1], transform to NCHW format. param: input_image_path: str, image path return: image: the processed image
image_raw = cv2.imread(input_image_path) h, w, c = image_raw.shape image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB) # Calculate widht and height and paddings r_w = INPUT_W / w r_h = INPUT_H / h if r_h > r_w: tw = INPUT_W th = int(r_w * h) tx1 = tx2 = 0 ty1 = int((INPUT_H - th) / 2) ty2 = INPUT_H - th - ty1 else: tw = int(r_h * w) th = INPUT_H tx1 = int((INPUT_W - tw) / 2) tx2 = INPUT_W - tw - tx1 ty1 = ty2 = 0 # Resize the image with long side while maintaining ratio image = cv2.resize(image, (tw, th)) # Pad the short side with (128,128,128) image = cv2.copyMakeBorder( image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128) ) image = image.astype(np.float32) # Normalize to [0,1] image /= 255.0 # HWC to CHW format: image = np.transpose(image, [2, 0, 1]) # CHW to NCHW format image = np.expand_dims(image, axis=0) # Convert the image to row-major order, also known as "C order": image = np.ascontiguousarray(image) return image, image_raw, h, w def xywh2xyxy(self, origin_h, origin_w, x): """ description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right param: origin_h: height of original image origin_w: width of original image x: A boxes tensor, each row is a box [center_x, center_y, w, h] return: y: A boxes tensor, each row is a box [x1, y1, x2, y2] """ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) r_w = INPUT_W / origin_w r_h = INPUT_H / origin_h if r_h > r_w: y[:, 0] = x[:, 0] - x[:, 2] / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y /= r_w else: y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 y /= r_h return y def post_process(self, output, origin_h, origin_w): """ description: postprocess the prediction param: output: A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h: height of original image origin_w: width of original image return: result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2] result_scores: finally scores, a tensor, each element is the score correspoing to box result_classid: finally classid, a tensor, each element is the classid correspoing to box """ # Get the num of boxes detected num = int(output[0]) # Reshape to a two dimentional ndarray pred = np.reshape(output[1:], (-1, 6))[:num, :] # to a torch Tensor pred = torch.Tensor(pred).cuda() # Get the boxes boxes = pred[:, :4] # Get the scores scores = pred[:, 4] # Get the classid classid = pred[:, 5] # Choose those boxes that score > CONF_THRESH si = scores > CONF_THRESH boxes = boxes[si, :] scores = scores[si] classid = classid[si] # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2] boxes = self.xywh2xyxy(origin_h, origin_w, boxes) # Do nms indices = torchvision.ops.nms(boxes, scores, iou_threshold=IOU_THRESHOLD).cpu() result_boxes = boxes[indices, :].cpu() result_scores = scores[indices].cpu() result_classid = classid[indices].cpu() return result_boxes, result_scores, result_classid class myThread(threading.Thread): def __init__(self, func, args): threading.Thread.__init__(self) self.func = func self.args = args def run(self): self.func(*self.args) if __name__ == "__main__": # load custom plugins PLUGIN_LIBRARY = "./libmyplugins.so" ctypes.CDLL(PLUGIN_LIBRARY) engine_file_path = "./yolov5s-hand-docker-comm.engine" # load coco labels categories = ["hand"] # a YoLov5TRT instance yolov5_wrapper = YoLov5TRT(engine_file_path) input_image_paths = ["images/2.jpg"] for input_image_path in input_image_paths: # create a new thread to do inference thread1 = myThread(yolov5_wrapper.doInference, ["./"+input_image_path]) thread1.start() thread1.join() # destroy the instance # yolov5_wrapper.destroy()
image_raw: the original image h: original height w: original width """
random_line_split
yolov5_trt12.py
""" An example that uses TensorRT's Python api to make inferences. """ import ctypes import os import random import sys import threading import time import cv2 import numpy as np import tensorrt as trt import torch import torchvision from trt_lite2 import TrtLite INPUT_W = 256 INPUT_H = 256 CONF_THRESH = 0.1 IOU_THRESHOLD = 0.4 labels = ['one', 'five', 'first', 'ok', 'heart single', 'yearh', 'three', 'four', 'six', 'i love you', 'gun', 'thumb up', 'nine', 'pink'] BATCH_SIZE = 1 ENGINE_PATH_21 = "./engine/resnet50_21.engine" ENGINE_PATH_GESTURE = "./engine/resnet50-gesture.engine" def plot_one_box(x, img, color=None, label=None, line_thickness=None): """ description: Plots one bounding box on image img, this function comes from YoLov5 project. param: x: a box likes [x1,y1,x2,y2] img: a opencv image object color: color to draw rectangle, such as (0,255,0) label: str line_thickness: int return: no return """ tl = ( line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 ) # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText( img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA, ) def draw_bd_handpose(img_,hand_,x,y): thick = 2 colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)] # cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick) cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick) cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick) def drawhand(img,outputs,img_width,img_height): print(outputs) pts_hand = {} for i in range(int(outputs.shape[0] / 2)):
draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线 # ------------- 绘制关键点 for i in range(int(outputs.shape[0] / 2)): x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1) cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1) class YoLov5TRT(object): """ description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops. """ def __init__(self, engine_file_path): TRT_LOGGER = trt.Logger(trt.Logger.INFO) trt_yolo = TrtLite(engine_file_path=engine_file_path) trt_yolo.print_info() self.buffers = trt_yolo.allocate_io_buffers(1, True) self.trt_yolo = trt_yolo # 识别人手的21个关键点 self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21) self.trt_lite21.print_info() # 识别手势 self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE) self.trt_lite_gesture.print_info() def doInference(self,image_path): threading.Thread.__init__(self) # Do image preprocess input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path) self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda() bindings = [t.data_ptr() for t in self.buffers] self.trt_yolo.execute(bindings, BATCH_SIZE) host_outputs = self.buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() print(host_outputs.shape) output = host_outputs.ravel() # Do postprocess result_boxes, result_scores, result_classid = self.post_process( output, origin_h, origin_w ) print(output.shape,len(result_boxes)) # Draw rectangles and labels on the original image for i in range(len(result_boxes)): box = result_boxes[i] print("box>>>",box) # 截出手的部位 image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])] # 推理手的21个特征点 hand_data = self.preprocess_hand(image_hand) output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel()) # 推理手势 output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel()) print("gesture:",output_gesture) index = np.argmax(output_gesture) label = labels[index] hand_width = int(box[2])-int(box[0]) hand_height = int(box[3])-int(box[1]) drawhand(image_hand,output21,hand_width,hand_height) print("w,h:",hand_width,hand_height) cv2.imwrite("hand_11.jpg", image_hand) plot_one_box( box, image_raw, label="{}:{:.2f}".format( label, result_scores[i] ), ) parent, filename = os.path.split(input_image_path) save_name = os.path.join(parent, "output_" + filename) #  Save image cv2.imwrite(save_name, image_raw) print("save img success") def doInference_resnet(self,trt_engine, data): i2shape = 1 io_info = trt_engine.get_io_info(i2shape) print(io_info) d_buffers = trt_engine.allocate_io_buffers(i2shape, True) print(io_info[1][2]) d_buffers[0] = data.cuda() bindings = [t.data_ptr() for t in d_buffers] # 进行推理 trt_engine.execute(bindings, i2shape) # output_data_trt = d_buffers[1].clone().cpu().detach().numpy() torch.cuda.synchronize() host_out = output_data_trt.ravel() return host_out def preprocess_hand(self,img): img_width = img.shape[1] img_height = img.shape[0] print(img.shape) # 输入图片预处理 img_ = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC) img_ = img_.astype(np.float32) img_ = (img_ - 128.) / 256. img_ = img_.transpose(2, 0, 1) img_ = torch.from_numpy(img_) img_ = img_.unsqueeze_(0) return img_ def preprocess_image(self, input_image_path): """ description: Read an image from image path, convert it to RGB, resize and pad it to target size, normalize to [0,1], transform to NCHW format. param: input_image_path: str, image path return: image: the processed image image_raw: the original image h: original height w: original width """ image_raw = cv2.imread(input_image_path) h, w, c = image_raw.shape image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB) # Calculate widht and height and paddings r_w = INPUT_W / w r_h = INPUT_H / h if r_h > r_w: tw = INPUT_W th = int(r_w * h) tx1 = tx2 = 0 ty1 = int((INPUT_H - th) / 2) ty2 = INPUT_H - th - ty1 else: tw = int(r_h * w) th = INPUT_H tx1 = int((INPUT_W - tw) / 2) tx2 = INPUT_W - tw - tx1 ty1 = ty2 = 0 # Resize the image with long side while maintaining ratio image = cv2.resize(image, (tw, th)) # Pad the short side with (128,128,128) image = cv2.copyMakeBorder( image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128) ) image = image.astype(np.float32) # Normalize to [0,1] image /= 255.0 # HWC to CHW format: image = np.transpose(image, [2, 0, 1]) # CHW to NCHW format image = np.expand_dims(image, axis=0) # Convert the image to row-major order, also known as "C order": image = np.ascontiguousarray(image) return image, image_raw, h, w def xywh2xyxy(self, origin_h, origin_w, x): """ description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right param: origin_h: height of original image origin_w: width of original image x: A boxes tensor, each row is a box [center_x, center_y, w, h] return: y: A boxes tensor, each row is a box [x1, y1, x2, y2] """ y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) r_w = INPUT_W / origin_w r_h = INPUT_H / origin_h if r_h > r_w: y[:, 0] = x[:, 0] - x[:, 2] / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2 y /= r_w else: y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2 y[:, 1] = x[:, 1] - x[:, 3] / 2 y[:, 3] = x[:, 1] + x[:, 3] / 2 y /= r_h return y def post_process(self, output, origin_h, origin_w): """ description: postprocess the prediction param: output: A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h: height of original image origin_w: width of original image return: result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2] result_scores: finally scores, a tensor, each element is the score correspoing to box result_classid: finally classid, a tensor, each element is the classid correspoing to box """ # Get the num of boxes detected num = int(output[0]) # Reshape to a two dimentional ndarray pred = np.reshape(output[1:], (-1, 6))[:num, :] # to a torch Tensor pred = torch.Tensor(pred).cuda() # Get the boxes boxes = pred[:, :4] # Get the scores scores = pred[:, 4] # Get the classid classid = pred[:, 5] # Choose those boxes that score > CONF_THRESH si = scores > CONF_THRESH boxes = boxes[si, :] scores = scores[si] classid = classid[si] # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2] boxes = self.xywh2xyxy(origin_h, origin_w, boxes) # Do nms indices = torchvision.ops.nms(boxes, scores, iou_threshold=IOU_THRESHOLD).cpu() result_boxes = boxes[indices, :].cpu() result_scores = scores[indices].cpu() result_classid = classid[indices].cpu() return result_boxes, result_scores, result_classid class myThread(threading.Thread): def __init__(self, func, args): threading.Thread.__init__(self) self.func = func self.args = args def run(self): self.func(*self.args) if __name__ == "__main__": # load custom plugins PLUGIN_LIBRARY = "./libmyplugins.so" ctypes.CDLL(PLUGIN_LIBRARY) engine_file_path = "./yolov5s-hand-docker-comm.engine" # load coco labels categories = ["hand"] # a YoLov5TRT instance yolov5_wrapper = YoLov5TRT(engine_file_path) input_image_paths = ["images/2.jpg"] for input_image_path in input_image_paths: # create a new thread to do inference thread1 = myThread(yolov5_wrapper.doInference, ["./"+input_image_path]) thread1.start() thread1.join() # destroy the instance # yolov5_wrapper.destroy()
x = (outputs[i * 2 + 0] * float(img_width)) y = (outputs[i * 2 + 1] * float(img_height)) pts_hand[str(i)] = {} pts_hand[str(i)] = { "x": x, "y": y, }
conditional_block
game.js
var Game = function(cid, w, h, callback){ var that = this; var txtColor = "#333"; //default text color var fps = 30; // add event listeners, this will store key pressed on key down in an array and remove on keyup document.addEventListener('keydown', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index === -1){ that.keysPressed.push(key); } }); document.addEventListener('keyup', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index > -1){ that.keysPressed.splice(index, 1); } }); /***********************/ /* member variables /***********************/ var ca = this.ca = document.getElementById(cid); var cx = this.cx = ca.getContext('2d'); this.ts = 20; //tile size in pixels this.ca.setAttribute('width', w*this.ts); this.ca.setAttribute('height', h*this.ts); this.tileW = w; //tile width this.tileH = h; //tile height this.w = parseInt(this.ca.getAttribute('width')); //pixel width this.h = parseInt(this.ca.getAttribute('height')); //pixel height this.topStart = 2; //start of game window this.objQ = []; //object queue this.iLoaded = false; //images loaded this.tick = new Date().getTime(); //draw tick this.utick = new Date().getTime(); //update tick this.currLevel = 0; this.levelObj; //current level object this.keysPressed = []; //array to store keys currently being pressed this.timePlayed = 0; /***********************/ /* member methods /***********************/ this.init = function(){ // initialize this.loadResourses(); //we must wait for images to be loaded before loading start screen if(this.iLoaded == false){ var wait = function(){ if(this.iLoaded == false){ setTimeout(wait, 300); }else{ this.loadLevel('start'); callback.apply(this); } }; setTimeout(wait, 300); } }; this.update = function(){ /****************************************/ /* update happens on each loop iteration. /* first run current level update method. /* loop through objects in object queue /* and run each object's update method. /****************************************/ n = new Date().getTime(); var i = n - this.utick; //i is interval since last update, passed to all update methods if(this.levelObj !== undefined){ this.levelObj.update(i); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].update(i); } } this.utick = n; //reset update tick }; this.draw = function(){ /****************************************/ /* draw happens at fps rate. /* first run current level draw method. /* loop through objects in object queue /* and run each object's draw method. /****************************************/ this.cx.clearRect(0, 0, this.w, this.h); //clear screen if(this.levelObj !== undefined){ this.levelObj.draw(); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].draw(); } } }; this.run = function(){ //run update on every loop iteration that.update(); //run draw at fps rate var thisTick = new Date().getTime(); var i = thisTick - that.tick; if(i >= 1000 / fps){ that.draw(); that.tick = thisTick; } }; this.loadLevel = function(l){ //load a level or load start or over screen this.objQ = []; if(l === 'start'){ this.levelObj = new startScreen(); }else if(l === 'over'){ this.levelObj = new overScreen(); }else{ this.objQ = []; this.currLevel = l; this.levelObj = new Level(this, 'tile'); } }; this.loadResourses = function(){ /****************************************/ /* load resources from file stored in /* resourcesFile variable. /* file currently only stores sprite data /****************************************/ var resources = ' {'; resources += ' "spriteSheet": "spritesheet.png",'; resources += ' "sprites":{'; resources += ' "hero":{"sx":0, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "zombie":{"sx":80, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "tile":{"sx":160, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "bullet":{"sx":240, "sy":0, "w":20, "h":20, "dimM":1, "frames":1}'; resources += ' }'; resources += ' }'; var jData = JSON.parse(resources); var spriteData = jData.sprites; var spriteSheet = jData.spriteSheet; sprites.load(spriteSheet, spriteData, function(){ that.iLoaded = true; }); }; //sprites object, loads sprite data and handles draw for sprites var sprites = new function(){ this.data = {}; //date stores x and y coord on spritesheet, w and h of sprite, and number of frames this.load = function(rImage, data, callback){ this.data = data; this.image = new Image(); this.image.onload = callback; this.image.src = rImage; }; this.draw = function(sprite, x, y, frame){ var s = this.data[sprite]; //this sprite frame = !frame ? 0 : frame; //default frame is 0 cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM); }; }; /***********************/ /* private classes /***********************/ var startScreen = function(){ // start screen. // on update check if enter key is pressed // load level 1 if enter is pressed this.update = function(){ if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.textAlign = "center"; that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75); that.cx.font = "14px verdana"; that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2); that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30); that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60); that.cx.font = "10px verdana"; that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100); that.cx.font = "14px verdana"; that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140); } }; }; var overScreen = function(){ // game over screen. // display scores. // on update check if enter or esc key is pressed // load level 1 if enter is pressed // load start screen if esc is pressed var totalTime = that.timePlayed; that.timePlayed = 0; this.update = function(){ if(that.keysPressed.indexOf(27) > -1){ that.loadLevel('start'); }else if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1); var scorePerLevelComplete = 100; var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1); score = isNaN(score) ? 0 : score; var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time." that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.fillText("Game Over", that.w/2, that.h/2 - 100); that.cx.font = "16px verdana"; that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50); that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20); that.cx.font = "12px verdana"; that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50); that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80); } }; }; var Hero = function(game, x, y, sprite){ /***********************/ /* hero class /***********************/ //start tile position this.x = x; this.y = y; this.frame = 1; // start frame this.s = sprite; // hero sprite //hero movement speed this.dx = 0; this.dy = 0; // tick used to control movement speed // hero will move dx or dy distance every // 50 ms this.xTick = 0; this.yTick = 0; // jetpack this.jetpack = false; this.jetpackTimer = 5000; this.fireTick = 0; // tick to control fire frequency var that = this; var chkCol = function(t, h, e){ // check collision with map // or with enemy if(t == "map"){ var yBelow = Math.ceil(that.y)-1; var xBelow1 = Math.floor(that.x); var xBelow2 = Math.ceil(that.x); // check collision with tiles below // and on either side of hero var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow); var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow); if(tileBelow1 == 1 || tileBelow2 == 1){ return true; } return false; }else{ // check collision with object h and object e var abs = Math.abs; return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1)); } }; this.update = function(i){ // add interval to all ticks this.xTick += i; this.yTick += i; this.fireTick += i; // check if any relevant keys are being pressed var left = game.keysPressed.indexOf(37); var right = game.keysPressed.indexOf(39); var jetpack = game.keysPressed.indexOf(40); var fire = game.keysPressed.indexOf(32); // if down arrow is pressed, set jetpack to true and // update jetpackTimer with interval // if allowed jetpack time is exceeded, set jetpack to false this.jetpack = jetpack > -1 ? true : false; this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer; if(this.jetpackTimer <= 0){ this.jetpack = false; this.jetpackTimer = 0; } // check for right and left arrow key and set dx accordingly this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0; // check for space key and run fire method if pressed if(fire > -1)this.fire(); // if jetpack is true, set dy accordingly if(this.jetpack){ this.dy = 0.25; }else{ // if jetpack is false, check for // collision with tile and set dy to // 0 if true and apply fall to dy if // false. if(this.y % 1 == 0){ if(chkCol('map')){ this.dy = 0; this.yTick = 0; }else{ this.dy = -0.25; } } } // apply dx and dy to x and y every 50 ms // then reset x and y Ticks if(this.xTick >= 50){ this.x += this.dx; this.xTick = 0; } if(this.yTick >= 50){ this.y += this.dy; this.yTick = 0; } // change directional frame accordingly var f = this.frame; if(this.dx > 0){ f = 1; }else if(this.dx < 0){ f = 0; } // add 2 to frame if jetpack is true if(this.jetpack && (f == 1 || f == 0)){ f = f + 2; }else if(!this.jetpack && (f > 1)){ f = f - 2; } this.frame = f; // check if hero has gone off screen and end game if so if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){ game.loadLevel('over'); } // check for collision with enemy // this also checks if there are no enemies // left and loads the next level if that's // the case var levelOver = true; for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ levelOver = false; e = game.objQ[i]; if(chkCol('enemy', this, e)){ game.loadLevel('over'); break; } } } if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level }; this.draw = function(){ // draws the jetpack timer and the hero sprite game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25); sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame); }; this.fire = function(){ // fires a bullet every 250 ms if the fire // key is pressed if(this.fireTick > 250){ // create a new bullet object at the hero's // position and add it to the object queue var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction var bullet = new Bullet(game, this.x, this.y, dir, 'bullet'); game.objQ.push(bullet); this.fireTick = 0; } }; }; var Bullet = function(game, x, y, dir, sprite){ this.s = sprite; this.x = x; this.y = y; this.dir = dir; // direction this.animTick = 0; // move bullet every 25 ms var chkCol = function(b, e){ // check collision between b and e objects // b = bullet, e = enemy var abs = Math.abs; return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1)); }; this.destroy = function(){ // if bullet exists in object queue // remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; this.update = function(i){ // update bullet x position every animTick this.animTick += i; if(this.animTick >= 25){ this.x += dir > 0 ? 0.25 : -0.25; this.animTick = 0; } // loop through enemy objects and check for // collision. If collided, destroy bullet // and enemy. for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ e = game.objQ[i]; if(chkCol(this, e)){ this.destroy(); e.destroy(); break; } } } // destroy bullet if gone off screen if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){ this.destroy(); } }; this.draw = function(){ // draw bullet sprites.draw(this.s,this.x*game.ts,this.y*game.ts); }; return this; }; function Enemy(game, group, sprite)
; var Level = function(game, s){ /***********************/ /* level class /***********************/ // map array. This is a 2d array of tiles generated from the platform // groups created in this.init(). Format = [0,0,0,0,0,1,1,1,1...] var map = []; var tileGroup = Math.floor(Math.random() * 2); // determines the tiles to use var sprite = s; // tile sprite for platforms this.init = function(){ // initialize level var m = []; // map array to hold tile groups var enemies = []; // array to hold enemies /***********************************/ /* generate the map dynamically /***********************************/ var platforms = Math.ceil(Math.random() * 2) + 4; // first calculate number of platforms var yVals = []; // array to hold the y value of each platform var lastVal = -1; // last platform's y value, this is to prevent platforms from being too close to each other // loop through platforms and generate [x,y] value group for(var n = 0; n <= platforms; n++){ // y value is calculated at random within a range // of 4 and minimum 4 tiles away from last y value var tY = Math.floor(Math.random() * 4) + lastVal + 4; if(tY < 35){ // don't create any platforms past tile 35, this is to sllow space for start platform // generate x start and x end values. x start is contained within // the first half of the map and x end is contained within the second // half of the map. There is provision for blank space on either side // of the platform to prevent the platform from spanning the width of the map var tX1 = Math.floor(Math.random() * (game.tileW / 2)) + 1; var tX2 = Math.floor(Math.random() * ((game.tileW / 2) - 10)) + (game.tileW / 2) + 10; var xVal = [tX1, tX2]; // set the xVal group var thisP = [xVal, tY]; // set the tile group m.push(thisP); // push this group to the map array // generate random number of enemies for this platform // this is based off of the size of the platform var e = Math.floor((tX2-tX1)/3); for(var i = 0; i <= e; i++){ // add enemy to enemy array enemies.push([n,"zombie"]); // format is [platform, sprite] } lastVal = tY; // reset last y value } } m.push([[0,1],37]); // add hero start platform // convert the m array storing platform groups into the // map array which stores tile values of 0 or 1. // format of map = [0,0,0,0,1,1,1,1,1,0...] var cPos = 0; // current position in 2 d array for(g in m){ // loop through each group var group = m[g]; // get x and y start and end values var xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; var xe = typeof(group[0]) !== 'number' ? group[0][1] : group[0]; var ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; var ye = typeof(group[1]) !== 'number' ? group[1][1] : group[1]; // convert [x,y] start value to 2d array index. // add [0] tiles to map array from cPos to // calculated start value, then add [1] tiles // to map array from start value to end value. for(;cPos < (ys*that.tileW)+xs; cPos++){ map[cPos] = 0; } for(;cPos < (ye*that.tileW)+xe; cPos++){ map[cPos] = 1; } } // after all platforms are created, complete map array // with [0] tiles. for(;cPos < that.tileW * that.tileH; cPos++){ map[cPos] = 0; } // loop through enemies array and create them // then add them to object queue for(var i = 0; i < enemies.length; i++){ var group = m[enemies[i][0]]; // get enemy group var sprite = enemies[i][1]; // get enemy sprite var thisEnemy = new Enemy(game, group, sprite); // create new enemy game.objQ.push(thisEnemy); // add to object queue } // create the hero and add to object queue var hero = new Hero(game, 0, 38, 'hero'); game.objQ.push(hero); }; this.update = function(i){ // update total time played game.timePlayed += i; }; this.draw = function(){ // draw each tile for(i in map){ var tx = i % game.tileW; var ty = parseInt(i / game.tileW); sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 1); if(map[i] === 1){ sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 0); } } // draw current level and time elapsed game.cx.fillStyle="#FFF"; //set background color game.cx.fillRect(0, 0, game.w, game.topStart * game.ts); //fill background game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Current Level: " + game.currLevel, 70, 25); game.cx.fillText("Lapsed Time: " + (game.timePlayed/1000).toFixed(2) + " seconds", 230, 25); }; this.getMap = function(){ return map; // return map }; this.getTile = function(x,y){ // return 0 or 1 for tile and [x,y] position var pos = (y * game.tileW) + x; return map[pos]; }; this.init(); // initialize level return this; }; this.init(); //initialize game return this; };
{ // x, y, sx, sy, this.s = sprite; this.frame = 0; // start frame // movement tick this.xtick = 0; this.ytick = 0; // variables to restrict movement to given platform // xs = xStart, xe = xEnd. They correspond to tile // start and tile end positions this.xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; this.xe = typeof(group[0]) !== 'number' ? group[0][1]-1 : group[0]-1; this.ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; this.ye = typeof(group[1]) !== 'number' ? group[1][1]-1 : group[1]-1; // set start position and speed this.x = Math.floor(Math.random() * (this.xe - this.xs)) + this.xs; this.y = Math.floor(Math.random() * (this.ye - this.ys)) + (this.ys+2); this.dx = ((Math.random() * 100) + 25) * (Math.ceil((Math.random() * 2)) === 1 ? 1 : -1); this.dy = 0; this.dir = this.dx < 0 ? 0 : 2; // direction used to calculate correct frame to display this.animTick = 0; // change sprite frame every 250 ms var that = this; this.update = function(i){ // update tics with interval this.animTick += i; this.xtick += i; this.ytick += i; // do animation every 250 ms if(this.animTick >= 250){ this.frame++; if(this.frame > 1)this.frame = 0; this.animTick = 0; } // enemy movement is a bit different from every other moverment // because enemy movement must be confined to a platform, it's // easier to move in 0.25 increments, so the speed calculation // is the time in ms, to update the enemy position. if(this.xtick >= Math.abs(this.dx)){ this.x += this.dx < 0 ? -0.25 : this.dx == 0 ? 0 : 0.25; this.xtick = 0; if(this.x >= this.xe || this.x <= this.xs){ // flip direction and set x to the edge of platform this.x = this.x >= this.xe ? this.xe : this.xs; this.dx *= -1; this.dir = this.dx < 0 ? 0 : 2; } } if(this.ytick >= Math.abs(this.dy)){ this.y += this.dy < 0 ? -0.25 : this.dy == 0 ? 0 : 0.25; this.ytick = 0; if(this.y == this.ye || this.y == this.ys){ this.dy *= -1; } } }; this.draw = function(){ sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame+this.dir); }; this.destroy = function(){ // check if this enemy is in the object queue and remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; return this; }
identifier_body
game.js
var Game = function(cid, w, h, callback){ var that = this; var txtColor = "#333"; //default text color var fps = 30; // add event listeners, this will store key pressed on key down in an array and remove on keyup document.addEventListener('keydown', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index === -1){ that.keysPressed.push(key); } }); document.addEventListener('keyup', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index > -1){ that.keysPressed.splice(index, 1); } }); /***********************/ /* member variables /***********************/ var ca = this.ca = document.getElementById(cid); var cx = this.cx = ca.getContext('2d'); this.ts = 20; //tile size in pixels this.ca.setAttribute('width', w*this.ts); this.ca.setAttribute('height', h*this.ts); this.tileW = w; //tile width this.tileH = h; //tile height this.w = parseInt(this.ca.getAttribute('width')); //pixel width this.h = parseInt(this.ca.getAttribute('height')); //pixel height this.topStart = 2; //start of game window this.objQ = []; //object queue this.iLoaded = false; //images loaded this.tick = new Date().getTime(); //draw tick this.utick = new Date().getTime(); //update tick this.currLevel = 0; this.levelObj; //current level object this.keysPressed = []; //array to store keys currently being pressed this.timePlayed = 0; /***********************/ /* member methods /***********************/ this.init = function(){ // initialize this.loadResourses(); //we must wait for images to be loaded before loading start screen if(this.iLoaded == false){ var wait = function(){ if(this.iLoaded == false){ setTimeout(wait, 300); }else{ this.loadLevel('start'); callback.apply(this); } }; setTimeout(wait, 300); } }; this.update = function(){ /****************************************/ /* update happens on each loop iteration. /* first run current level update method. /* loop through objects in object queue /* and run each object's update method. /****************************************/ n = new Date().getTime(); var i = n - this.utick; //i is interval since last update, passed to all update methods if(this.levelObj !== undefined){ this.levelObj.update(i); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].update(i); } } this.utick = n; //reset update tick }; this.draw = function(){ /****************************************/ /* draw happens at fps rate. /* first run current level draw method. /* loop through objects in object queue /* and run each object's draw method. /****************************************/ this.cx.clearRect(0, 0, this.w, this.h); //clear screen if(this.levelObj !== undefined){ this.levelObj.draw(); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].draw(); } } }; this.run = function(){ //run update on every loop iteration that.update(); //run draw at fps rate var thisTick = new Date().getTime(); var i = thisTick - that.tick; if(i >= 1000 / fps){ that.draw(); that.tick = thisTick; } }; this.loadLevel = function(l){ //load a level or load start or over screen this.objQ = []; if(l === 'start'){ this.levelObj = new startScreen(); }else if(l === 'over'){ this.levelObj = new overScreen(); }else{ this.objQ = []; this.currLevel = l; this.levelObj = new Level(this, 'tile'); } }; this.loadResourses = function(){ /****************************************/ /* load resources from file stored in /* resourcesFile variable. /* file currently only stores sprite data /****************************************/ var resources = ' {'; resources += ' "spriteSheet": "spritesheet.png",'; resources += ' "sprites":{'; resources += ' "hero":{"sx":0, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "zombie":{"sx":80, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "tile":{"sx":160, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "bullet":{"sx":240, "sy":0, "w":20, "h":20, "dimM":1, "frames":1}'; resources += ' }'; resources += ' }'; var jData = JSON.parse(resources); var spriteData = jData.sprites; var spriteSheet = jData.spriteSheet; sprites.load(spriteSheet, spriteData, function(){ that.iLoaded = true; }); }; //sprites object, loads sprite data and handles draw for sprites var sprites = new function(){ this.data = {}; //date stores x and y coord on spritesheet, w and h of sprite, and number of frames this.load = function(rImage, data, callback){ this.data = data; this.image = new Image(); this.image.onload = callback; this.image.src = rImage; }; this.draw = function(sprite, x, y, frame){ var s = this.data[sprite]; //this sprite frame = !frame ? 0 : frame; //default frame is 0 cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM); }; }; /***********************/ /* private classes /***********************/ var startScreen = function(){ // start screen. // on update check if enter key is pressed // load level 1 if enter is pressed this.update = function(){ if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.textAlign = "center"; that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75); that.cx.font = "14px verdana"; that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2); that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30); that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60); that.cx.font = "10px verdana"; that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100); that.cx.font = "14px verdana"; that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140); } }; }; var overScreen = function(){ // game over screen. // display scores. // on update check if enter or esc key is pressed // load level 1 if enter is pressed // load start screen if esc is pressed var totalTime = that.timePlayed; that.timePlayed = 0; this.update = function(){ if(that.keysPressed.indexOf(27) > -1){ that.loadLevel('start'); }else if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1); var scorePerLevelComplete = 100; var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1); score = isNaN(score) ? 0 : score;
that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.fillText("Game Over", that.w/2, that.h/2 - 100); that.cx.font = "16px verdana"; that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50); that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20); that.cx.font = "12px verdana"; that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50); that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80); } }; }; var Hero = function(game, x, y, sprite){ /***********************/ /* hero class /***********************/ //start tile position this.x = x; this.y = y; this.frame = 1; // start frame this.s = sprite; // hero sprite //hero movement speed this.dx = 0; this.dy = 0; // tick used to control movement speed // hero will move dx or dy distance every // 50 ms this.xTick = 0; this.yTick = 0; // jetpack this.jetpack = false; this.jetpackTimer = 5000; this.fireTick = 0; // tick to control fire frequency var that = this; var chkCol = function(t, h, e){ // check collision with map // or with enemy if(t == "map"){ var yBelow = Math.ceil(that.y)-1; var xBelow1 = Math.floor(that.x); var xBelow2 = Math.ceil(that.x); // check collision with tiles below // and on either side of hero var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow); var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow); if(tileBelow1 == 1 || tileBelow2 == 1){ return true; } return false; }else{ // check collision with object h and object e var abs = Math.abs; return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1)); } }; this.update = function(i){ // add interval to all ticks this.xTick += i; this.yTick += i; this.fireTick += i; // check if any relevant keys are being pressed var left = game.keysPressed.indexOf(37); var right = game.keysPressed.indexOf(39); var jetpack = game.keysPressed.indexOf(40); var fire = game.keysPressed.indexOf(32); // if down arrow is pressed, set jetpack to true and // update jetpackTimer with interval // if allowed jetpack time is exceeded, set jetpack to false this.jetpack = jetpack > -1 ? true : false; this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer; if(this.jetpackTimer <= 0){ this.jetpack = false; this.jetpackTimer = 0; } // check for right and left arrow key and set dx accordingly this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0; // check for space key and run fire method if pressed if(fire > -1)this.fire(); // if jetpack is true, set dy accordingly if(this.jetpack){ this.dy = 0.25; }else{ // if jetpack is false, check for // collision with tile and set dy to // 0 if true and apply fall to dy if // false. if(this.y % 1 == 0){ if(chkCol('map')){ this.dy = 0; this.yTick = 0; }else{ this.dy = -0.25; } } } // apply dx and dy to x and y every 50 ms // then reset x and y Ticks if(this.xTick >= 50){ this.x += this.dx; this.xTick = 0; } if(this.yTick >= 50){ this.y += this.dy; this.yTick = 0; } // change directional frame accordingly var f = this.frame; if(this.dx > 0){ f = 1; }else if(this.dx < 0){ f = 0; } // add 2 to frame if jetpack is true if(this.jetpack && (f == 1 || f == 0)){ f = f + 2; }else if(!this.jetpack && (f > 1)){ f = f - 2; } this.frame = f; // check if hero has gone off screen and end game if so if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){ game.loadLevel('over'); } // check for collision with enemy // this also checks if there are no enemies // left and loads the next level if that's // the case var levelOver = true; for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ levelOver = false; e = game.objQ[i]; if(chkCol('enemy', this, e)){ game.loadLevel('over'); break; } } } if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level }; this.draw = function(){ // draws the jetpack timer and the hero sprite game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25); sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame); }; this.fire = function(){ // fires a bullet every 250 ms if the fire // key is pressed if(this.fireTick > 250){ // create a new bullet object at the hero's // position and add it to the object queue var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction var bullet = new Bullet(game, this.x, this.y, dir, 'bullet'); game.objQ.push(bullet); this.fireTick = 0; } }; }; var Bullet = function(game, x, y, dir, sprite){ this.s = sprite; this.x = x; this.y = y; this.dir = dir; // direction this.animTick = 0; // move bullet every 25 ms var chkCol = function(b, e){ // check collision between b and e objects // b = bullet, e = enemy var abs = Math.abs; return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1)); }; this.destroy = function(){ // if bullet exists in object queue // remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; this.update = function(i){ // update bullet x position every animTick this.animTick += i; if(this.animTick >= 25){ this.x += dir > 0 ? 0.25 : -0.25; this.animTick = 0; } // loop through enemy objects and check for // collision. If collided, destroy bullet // and enemy. for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ e = game.objQ[i]; if(chkCol(this, e)){ this.destroy(); e.destroy(); break; } } } // destroy bullet if gone off screen if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){ this.destroy(); } }; this.draw = function(){ // draw bullet sprites.draw(this.s,this.x*game.ts,this.y*game.ts); }; return this; }; function Enemy(game, group, sprite){ // x, y, sx, sy, this.s = sprite; this.frame = 0; // start frame // movement tick this.xtick = 0; this.ytick = 0; // variables to restrict movement to given platform // xs = xStart, xe = xEnd. They correspond to tile // start and tile end positions this.xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; this.xe = typeof(group[0]) !== 'number' ? group[0][1]-1 : group[0]-1; this.ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; this.ye = typeof(group[1]) !== 'number' ? group[1][1]-1 : group[1]-1; // set start position and speed this.x = Math.floor(Math.random() * (this.xe - this.xs)) + this.xs; this.y = Math.floor(Math.random() * (this.ye - this.ys)) + (this.ys+2); this.dx = ((Math.random() * 100) + 25) * (Math.ceil((Math.random() * 2)) === 1 ? 1 : -1); this.dy = 0; this.dir = this.dx < 0 ? 0 : 2; // direction used to calculate correct frame to display this.animTick = 0; // change sprite frame every 250 ms var that = this; this.update = function(i){ // update tics with interval this.animTick += i; this.xtick += i; this.ytick += i; // do animation every 250 ms if(this.animTick >= 250){ this.frame++; if(this.frame > 1)this.frame = 0; this.animTick = 0; } // enemy movement is a bit different from every other moverment // because enemy movement must be confined to a platform, it's // easier to move in 0.25 increments, so the speed calculation // is the time in ms, to update the enemy position. if(this.xtick >= Math.abs(this.dx)){ this.x += this.dx < 0 ? -0.25 : this.dx == 0 ? 0 : 0.25; this.xtick = 0; if(this.x >= this.xe || this.x <= this.xs){ // flip direction and set x to the edge of platform this.x = this.x >= this.xe ? this.xe : this.xs; this.dx *= -1; this.dir = this.dx < 0 ? 0 : 2; } } if(this.ytick >= Math.abs(this.dy)){ this.y += this.dy < 0 ? -0.25 : this.dy == 0 ? 0 : 0.25; this.ytick = 0; if(this.y == this.ye || this.y == this.ys){ this.dy *= -1; } } }; this.draw = function(){ sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame+this.dir); }; this.destroy = function(){ // check if this enemy is in the object queue and remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; return this; }; var Level = function(game, s){ /***********************/ /* level class /***********************/ // map array. This is a 2d array of tiles generated from the platform // groups created in this.init(). Format = [0,0,0,0,0,1,1,1,1...] var map = []; var tileGroup = Math.floor(Math.random() * 2); // determines the tiles to use var sprite = s; // tile sprite for platforms this.init = function(){ // initialize level var m = []; // map array to hold tile groups var enemies = []; // array to hold enemies /***********************************/ /* generate the map dynamically /***********************************/ var platforms = Math.ceil(Math.random() * 2) + 4; // first calculate number of platforms var yVals = []; // array to hold the y value of each platform var lastVal = -1; // last platform's y value, this is to prevent platforms from being too close to each other // loop through platforms and generate [x,y] value group for(var n = 0; n <= platforms; n++){ // y value is calculated at random within a range // of 4 and minimum 4 tiles away from last y value var tY = Math.floor(Math.random() * 4) + lastVal + 4; if(tY < 35){ // don't create any platforms past tile 35, this is to sllow space for start platform // generate x start and x end values. x start is contained within // the first half of the map and x end is contained within the second // half of the map. There is provision for blank space on either side // of the platform to prevent the platform from spanning the width of the map var tX1 = Math.floor(Math.random() * (game.tileW / 2)) + 1; var tX2 = Math.floor(Math.random() * ((game.tileW / 2) - 10)) + (game.tileW / 2) + 10; var xVal = [tX1, tX2]; // set the xVal group var thisP = [xVal, tY]; // set the tile group m.push(thisP); // push this group to the map array // generate random number of enemies for this platform // this is based off of the size of the platform var e = Math.floor((tX2-tX1)/3); for(var i = 0; i <= e; i++){ // add enemy to enemy array enemies.push([n,"zombie"]); // format is [platform, sprite] } lastVal = tY; // reset last y value } } m.push([[0,1],37]); // add hero start platform // convert the m array storing platform groups into the // map array which stores tile values of 0 or 1. // format of map = [0,0,0,0,1,1,1,1,1,0...] var cPos = 0; // current position in 2 d array for(g in m){ // loop through each group var group = m[g]; // get x and y start and end values var xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; var xe = typeof(group[0]) !== 'number' ? group[0][1] : group[0]; var ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; var ye = typeof(group[1]) !== 'number' ? group[1][1] : group[1]; // convert [x,y] start value to 2d array index. // add [0] tiles to map array from cPos to // calculated start value, then add [1] tiles // to map array from start value to end value. for(;cPos < (ys*that.tileW)+xs; cPos++){ map[cPos] = 0; } for(;cPos < (ye*that.tileW)+xe; cPos++){ map[cPos] = 1; } } // after all platforms are created, complete map array // with [0] tiles. for(;cPos < that.tileW * that.tileH; cPos++){ map[cPos] = 0; } // loop through enemies array and create them // then add them to object queue for(var i = 0; i < enemies.length; i++){ var group = m[enemies[i][0]]; // get enemy group var sprite = enemies[i][1]; // get enemy sprite var thisEnemy = new Enemy(game, group, sprite); // create new enemy game.objQ.push(thisEnemy); // add to object queue } // create the hero and add to object queue var hero = new Hero(game, 0, 38, 'hero'); game.objQ.push(hero); }; this.update = function(i){ // update total time played game.timePlayed += i; }; this.draw = function(){ // draw each tile for(i in map){ var tx = i % game.tileW; var ty = parseInt(i / game.tileW); sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 1); if(map[i] === 1){ sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 0); } } // draw current level and time elapsed game.cx.fillStyle="#FFF"; //set background color game.cx.fillRect(0, 0, game.w, game.topStart * game.ts); //fill background game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Current Level: " + game.currLevel, 70, 25); game.cx.fillText("Lapsed Time: " + (game.timePlayed/1000).toFixed(2) + " seconds", 230, 25); }; this.getMap = function(){ return map; // return map }; this.getTile = function(x,y){ // return 0 or 1 for tile and [x,y] position var pos = (y * game.tileW) + x; return map[pos]; }; this.init(); // initialize level return this; }; this.init(); //initialize game return this; };
var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time."
random_line_split
game.js
var Game = function(cid, w, h, callback){ var that = this; var txtColor = "#333"; //default text color var fps = 30; // add event listeners, this will store key pressed on key down in an array and remove on keyup document.addEventListener('keydown', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index === -1){ that.keysPressed.push(key); } }); document.addEventListener('keyup', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index > -1){ that.keysPressed.splice(index, 1); } }); /***********************/ /* member variables /***********************/ var ca = this.ca = document.getElementById(cid); var cx = this.cx = ca.getContext('2d'); this.ts = 20; //tile size in pixels this.ca.setAttribute('width', w*this.ts); this.ca.setAttribute('height', h*this.ts); this.tileW = w; //tile width this.tileH = h; //tile height this.w = parseInt(this.ca.getAttribute('width')); //pixel width this.h = parseInt(this.ca.getAttribute('height')); //pixel height this.topStart = 2; //start of game window this.objQ = []; //object queue this.iLoaded = false; //images loaded this.tick = new Date().getTime(); //draw tick this.utick = new Date().getTime(); //update tick this.currLevel = 0; this.levelObj; //current level object this.keysPressed = []; //array to store keys currently being pressed this.timePlayed = 0; /***********************/ /* member methods /***********************/ this.init = function(){ // initialize this.loadResourses(); //we must wait for images to be loaded before loading start screen if(this.iLoaded == false){ var wait = function(){ if(this.iLoaded == false){ setTimeout(wait, 300); }else{ this.loadLevel('start'); callback.apply(this); } }; setTimeout(wait, 300); } }; this.update = function(){ /****************************************/ /* update happens on each loop iteration. /* first run current level update method. /* loop through objects in object queue /* and run each object's update method. /****************************************/ n = new Date().getTime(); var i = n - this.utick; //i is interval since last update, passed to all update methods if(this.levelObj !== undefined){ this.levelObj.update(i); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].update(i); } } this.utick = n; //reset update tick }; this.draw = function(){ /****************************************/ /* draw happens at fps rate. /* first run current level draw method. /* loop through objects in object queue /* and run each object's draw method. /****************************************/ this.cx.clearRect(0, 0, this.w, this.h); //clear screen if(this.levelObj !== undefined){ this.levelObj.draw(); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].draw(); } } }; this.run = function(){ //run update on every loop iteration that.update(); //run draw at fps rate var thisTick = new Date().getTime(); var i = thisTick - that.tick; if(i >= 1000 / fps){ that.draw(); that.tick = thisTick; } }; this.loadLevel = function(l){ //load a level or load start or over screen this.objQ = []; if(l === 'start'){ this.levelObj = new startScreen(); }else if(l === 'over'){ this.levelObj = new overScreen(); }else{ this.objQ = []; this.currLevel = l; this.levelObj = new Level(this, 'tile'); } }; this.loadResourses = function(){ /****************************************/ /* load resources from file stored in /* resourcesFile variable. /* file currently only stores sprite data /****************************************/ var resources = ' {'; resources += ' "spriteSheet": "spritesheet.png",'; resources += ' "sprites":{'; resources += ' "hero":{"sx":0, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "zombie":{"sx":80, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "tile":{"sx":160, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "bullet":{"sx":240, "sy":0, "w":20, "h":20, "dimM":1, "frames":1}'; resources += ' }'; resources += ' }'; var jData = JSON.parse(resources); var spriteData = jData.sprites; var spriteSheet = jData.spriteSheet; sprites.load(spriteSheet, spriteData, function(){ that.iLoaded = true; }); }; //sprites object, loads sprite data and handles draw for sprites var sprites = new function(){ this.data = {}; //date stores x and y coord on spritesheet, w and h of sprite, and number of frames this.load = function(rImage, data, callback){ this.data = data; this.image = new Image(); this.image.onload = callback; this.image.src = rImage; }; this.draw = function(sprite, x, y, frame){ var s = this.data[sprite]; //this sprite frame = !frame ? 0 : frame; //default frame is 0 cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM); }; }; /***********************/ /* private classes /***********************/ var startScreen = function(){ // start screen. // on update check if enter key is pressed // load level 1 if enter is pressed this.update = function(){ if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.textAlign = "center"; that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75); that.cx.font = "14px verdana"; that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2); that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30); that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60); that.cx.font = "10px verdana"; that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100); that.cx.font = "14px verdana"; that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140); } }; }; var overScreen = function(){ // game over screen. // display scores. // on update check if enter or esc key is pressed // load level 1 if enter is pressed // load start screen if esc is pressed var totalTime = that.timePlayed; that.timePlayed = 0; this.update = function(){ if(that.keysPressed.indexOf(27) > -1){ that.loadLevel('start'); }else if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1); var scorePerLevelComplete = 100; var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1); score = isNaN(score) ? 0 : score; var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time." that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.fillText("Game Over", that.w/2, that.h/2 - 100); that.cx.font = "16px verdana"; that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50); that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20); that.cx.font = "12px verdana"; that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50); that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80); } }; }; var Hero = function(game, x, y, sprite){ /***********************/ /* hero class /***********************/ //start tile position this.x = x; this.y = y; this.frame = 1; // start frame this.s = sprite; // hero sprite //hero movement speed this.dx = 0; this.dy = 0; // tick used to control movement speed // hero will move dx or dy distance every // 50 ms this.xTick = 0; this.yTick = 0; // jetpack this.jetpack = false; this.jetpackTimer = 5000; this.fireTick = 0; // tick to control fire frequency var that = this; var chkCol = function(t, h, e){ // check collision with map // or with enemy if(t == "map"){ var yBelow = Math.ceil(that.y)-1; var xBelow1 = Math.floor(that.x); var xBelow2 = Math.ceil(that.x); // check collision with tiles below // and on either side of hero var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow); var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow); if(tileBelow1 == 1 || tileBelow2 == 1){ return true; } return false; }else{ // check collision with object h and object e var abs = Math.abs; return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1)); } }; this.update = function(i){ // add interval to all ticks this.xTick += i; this.yTick += i; this.fireTick += i; // check if any relevant keys are being pressed var left = game.keysPressed.indexOf(37); var right = game.keysPressed.indexOf(39); var jetpack = game.keysPressed.indexOf(40); var fire = game.keysPressed.indexOf(32); // if down arrow is pressed, set jetpack to true and // update jetpackTimer with interval // if allowed jetpack time is exceeded, set jetpack to false this.jetpack = jetpack > -1 ? true : false; this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer; if(this.jetpackTimer <= 0){ this.jetpack = false; this.jetpackTimer = 0; } // check for right and left arrow key and set dx accordingly this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0; // check for space key and run fire method if pressed if(fire > -1)this.fire(); // if jetpack is true, set dy accordingly if(this.jetpack){ this.dy = 0.25; }else{ // if jetpack is false, check for // collision with tile and set dy to // 0 if true and apply fall to dy if // false. if(this.y % 1 == 0){ if(chkCol('map')){ this.dy = 0; this.yTick = 0; }else{ this.dy = -0.25; } } } // apply dx and dy to x and y every 50 ms // then reset x and y Ticks if(this.xTick >= 50){ this.x += this.dx; this.xTick = 0; } if(this.yTick >= 50){ this.y += this.dy; this.yTick = 0; } // change directional frame accordingly var f = this.frame; if(this.dx > 0){ f = 1; }else if(this.dx < 0){ f = 0; } // add 2 to frame if jetpack is true if(this.jetpack && (f == 1 || f == 0)){ f = f + 2; }else if(!this.jetpack && (f > 1)){ f = f - 2; } this.frame = f; // check if hero has gone off screen and end game if so if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){ game.loadLevel('over'); } // check for collision with enemy // this also checks if there are no enemies // left and loads the next level if that's // the case var levelOver = true; for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ levelOver = false; e = game.objQ[i]; if(chkCol('enemy', this, e)){ game.loadLevel('over'); break; } } } if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level }; this.draw = function(){ // draws the jetpack timer and the hero sprite game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25); sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame); }; this.fire = function(){ // fires a bullet every 250 ms if the fire // key is pressed if(this.fireTick > 250){ // create a new bullet object at the hero's // position and add it to the object queue var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction var bullet = new Bullet(game, this.x, this.y, dir, 'bullet'); game.objQ.push(bullet); this.fireTick = 0; } }; }; var Bullet = function(game, x, y, dir, sprite){ this.s = sprite; this.x = x; this.y = y; this.dir = dir; // direction this.animTick = 0; // move bullet every 25 ms var chkCol = function(b, e){ // check collision between b and e objects // b = bullet, e = enemy var abs = Math.abs; return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1)); }; this.destroy = function(){ // if bullet exists in object queue // remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; this.update = function(i){ // update bullet x position every animTick this.animTick += i; if(this.animTick >= 25){ this.x += dir > 0 ? 0.25 : -0.25; this.animTick = 0; } // loop through enemy objects and check for // collision. If collided, destroy bullet // and enemy. for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ e = game.objQ[i]; if(chkCol(this, e)){ this.destroy(); e.destroy(); break; } } } // destroy bullet if gone off screen if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){ this.destroy(); } }; this.draw = function(){ // draw bullet sprites.draw(this.s,this.x*game.ts,this.y*game.ts); }; return this; }; function
(game, group, sprite){ // x, y, sx, sy, this.s = sprite; this.frame = 0; // start frame // movement tick this.xtick = 0; this.ytick = 0; // variables to restrict movement to given platform // xs = xStart, xe = xEnd. They correspond to tile // start and tile end positions this.xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; this.xe = typeof(group[0]) !== 'number' ? group[0][1]-1 : group[0]-1; this.ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; this.ye = typeof(group[1]) !== 'number' ? group[1][1]-1 : group[1]-1; // set start position and speed this.x = Math.floor(Math.random() * (this.xe - this.xs)) + this.xs; this.y = Math.floor(Math.random() * (this.ye - this.ys)) + (this.ys+2); this.dx = ((Math.random() * 100) + 25) * (Math.ceil((Math.random() * 2)) === 1 ? 1 : -1); this.dy = 0; this.dir = this.dx < 0 ? 0 : 2; // direction used to calculate correct frame to display this.animTick = 0; // change sprite frame every 250 ms var that = this; this.update = function(i){ // update tics with interval this.animTick += i; this.xtick += i; this.ytick += i; // do animation every 250 ms if(this.animTick >= 250){ this.frame++; if(this.frame > 1)this.frame = 0; this.animTick = 0; } // enemy movement is a bit different from every other moverment // because enemy movement must be confined to a platform, it's // easier to move in 0.25 increments, so the speed calculation // is the time in ms, to update the enemy position. if(this.xtick >= Math.abs(this.dx)){ this.x += this.dx < 0 ? -0.25 : this.dx == 0 ? 0 : 0.25; this.xtick = 0; if(this.x >= this.xe || this.x <= this.xs){ // flip direction and set x to the edge of platform this.x = this.x >= this.xe ? this.xe : this.xs; this.dx *= -1; this.dir = this.dx < 0 ? 0 : 2; } } if(this.ytick >= Math.abs(this.dy)){ this.y += this.dy < 0 ? -0.25 : this.dy == 0 ? 0 : 0.25; this.ytick = 0; if(this.y == this.ye || this.y == this.ys){ this.dy *= -1; } } }; this.draw = function(){ sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame+this.dir); }; this.destroy = function(){ // check if this enemy is in the object queue and remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; return this; }; var Level = function(game, s){ /***********************/ /* level class /***********************/ // map array. This is a 2d array of tiles generated from the platform // groups created in this.init(). Format = [0,0,0,0,0,1,1,1,1...] var map = []; var tileGroup = Math.floor(Math.random() * 2); // determines the tiles to use var sprite = s; // tile sprite for platforms this.init = function(){ // initialize level var m = []; // map array to hold tile groups var enemies = []; // array to hold enemies /***********************************/ /* generate the map dynamically /***********************************/ var platforms = Math.ceil(Math.random() * 2) + 4; // first calculate number of platforms var yVals = []; // array to hold the y value of each platform var lastVal = -1; // last platform's y value, this is to prevent platforms from being too close to each other // loop through platforms and generate [x,y] value group for(var n = 0; n <= platforms; n++){ // y value is calculated at random within a range // of 4 and minimum 4 tiles away from last y value var tY = Math.floor(Math.random() * 4) + lastVal + 4; if(tY < 35){ // don't create any platforms past tile 35, this is to sllow space for start platform // generate x start and x end values. x start is contained within // the first half of the map and x end is contained within the second // half of the map. There is provision for blank space on either side // of the platform to prevent the platform from spanning the width of the map var tX1 = Math.floor(Math.random() * (game.tileW / 2)) + 1; var tX2 = Math.floor(Math.random() * ((game.tileW / 2) - 10)) + (game.tileW / 2) + 10; var xVal = [tX1, tX2]; // set the xVal group var thisP = [xVal, tY]; // set the tile group m.push(thisP); // push this group to the map array // generate random number of enemies for this platform // this is based off of the size of the platform var e = Math.floor((tX2-tX1)/3); for(var i = 0; i <= e; i++){ // add enemy to enemy array enemies.push([n,"zombie"]); // format is [platform, sprite] } lastVal = tY; // reset last y value } } m.push([[0,1],37]); // add hero start platform // convert the m array storing platform groups into the // map array which stores tile values of 0 or 1. // format of map = [0,0,0,0,1,1,1,1,1,0...] var cPos = 0; // current position in 2 d array for(g in m){ // loop through each group var group = m[g]; // get x and y start and end values var xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; var xe = typeof(group[0]) !== 'number' ? group[0][1] : group[0]; var ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; var ye = typeof(group[1]) !== 'number' ? group[1][1] : group[1]; // convert [x,y] start value to 2d array index. // add [0] tiles to map array from cPos to // calculated start value, then add [1] tiles // to map array from start value to end value. for(;cPos < (ys*that.tileW)+xs; cPos++){ map[cPos] = 0; } for(;cPos < (ye*that.tileW)+xe; cPos++){ map[cPos] = 1; } } // after all platforms are created, complete map array // with [0] tiles. for(;cPos < that.tileW * that.tileH; cPos++){ map[cPos] = 0; } // loop through enemies array and create them // then add them to object queue for(var i = 0; i < enemies.length; i++){ var group = m[enemies[i][0]]; // get enemy group var sprite = enemies[i][1]; // get enemy sprite var thisEnemy = new Enemy(game, group, sprite); // create new enemy game.objQ.push(thisEnemy); // add to object queue } // create the hero and add to object queue var hero = new Hero(game, 0, 38, 'hero'); game.objQ.push(hero); }; this.update = function(i){ // update total time played game.timePlayed += i; }; this.draw = function(){ // draw each tile for(i in map){ var tx = i % game.tileW; var ty = parseInt(i / game.tileW); sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 1); if(map[i] === 1){ sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 0); } } // draw current level and time elapsed game.cx.fillStyle="#FFF"; //set background color game.cx.fillRect(0, 0, game.w, game.topStart * game.ts); //fill background game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Current Level: " + game.currLevel, 70, 25); game.cx.fillText("Lapsed Time: " + (game.timePlayed/1000).toFixed(2) + " seconds", 230, 25); }; this.getMap = function(){ return map; // return map }; this.getTile = function(x,y){ // return 0 or 1 for tile and [x,y] position var pos = (y * game.tileW) + x; return map[pos]; }; this.init(); // initialize level return this; }; this.init(); //initialize game return this; };
Enemy
identifier_name
game.js
var Game = function(cid, w, h, callback){ var that = this; var txtColor = "#333"; //default text color var fps = 30; // add event listeners, this will store key pressed on key down in an array and remove on keyup document.addEventListener('keydown', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index === -1){ that.keysPressed.push(key); } }); document.addEventListener('keyup', function(e){ var key = e.keyCode; var index = that.keysPressed.indexOf(key); if(index > -1){ that.keysPressed.splice(index, 1); } }); /***********************/ /* member variables /***********************/ var ca = this.ca = document.getElementById(cid); var cx = this.cx = ca.getContext('2d'); this.ts = 20; //tile size in pixels this.ca.setAttribute('width', w*this.ts); this.ca.setAttribute('height', h*this.ts); this.tileW = w; //tile width this.tileH = h; //tile height this.w = parseInt(this.ca.getAttribute('width')); //pixel width this.h = parseInt(this.ca.getAttribute('height')); //pixel height this.topStart = 2; //start of game window this.objQ = []; //object queue this.iLoaded = false; //images loaded this.tick = new Date().getTime(); //draw tick this.utick = new Date().getTime(); //update tick this.currLevel = 0; this.levelObj; //current level object this.keysPressed = []; //array to store keys currently being pressed this.timePlayed = 0; /***********************/ /* member methods /***********************/ this.init = function(){ // initialize this.loadResourses(); //we must wait for images to be loaded before loading start screen if(this.iLoaded == false){ var wait = function(){ if(this.iLoaded == false){ setTimeout(wait, 300); }else{ this.loadLevel('start'); callback.apply(this); } }; setTimeout(wait, 300); } }; this.update = function(){ /****************************************/ /* update happens on each loop iteration. /* first run current level update method. /* loop through objects in object queue /* and run each object's update method. /****************************************/ n = new Date().getTime(); var i = n - this.utick; //i is interval since last update, passed to all update methods if(this.levelObj !== undefined){ this.levelObj.update(i); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].update(i); } } this.utick = n; //reset update tick }; this.draw = function(){ /****************************************/ /* draw happens at fps rate. /* first run current level draw method. /* loop through objects in object queue /* and run each object's draw method. /****************************************/ this.cx.clearRect(0, 0, this.w, this.h); //clear screen if(this.levelObj !== undefined){ this.levelObj.draw(); } for(o in this.objQ){ if(this.objQ[o] !== undefined){ this.objQ[o].draw(); } } }; this.run = function(){ //run update on every loop iteration that.update(); //run draw at fps rate var thisTick = new Date().getTime(); var i = thisTick - that.tick; if(i >= 1000 / fps){ that.draw(); that.tick = thisTick; } }; this.loadLevel = function(l){ //load a level or load start or over screen this.objQ = []; if(l === 'start'){ this.levelObj = new startScreen(); }else if(l === 'over'){ this.levelObj = new overScreen(); }else{ this.objQ = []; this.currLevel = l; this.levelObj = new Level(this, 'tile'); } }; this.loadResourses = function(){ /****************************************/ /* load resources from file stored in /* resourcesFile variable. /* file currently only stores sprite data /****************************************/ var resources = ' {'; resources += ' "spriteSheet": "spritesheet.png",'; resources += ' "sprites":{'; resources += ' "hero":{"sx":0, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "zombie":{"sx":80, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "tile":{"sx":160, "sy":0, "w":20, "h":20, "dimM":1, "frames":2},'; resources += ' "bullet":{"sx":240, "sy":0, "w":20, "h":20, "dimM":1, "frames":1}'; resources += ' }'; resources += ' }'; var jData = JSON.parse(resources); var spriteData = jData.sprites; var spriteSheet = jData.spriteSheet; sprites.load(spriteSheet, spriteData, function(){ that.iLoaded = true; }); }; //sprites object, loads sprite data and handles draw for sprites var sprites = new function(){ this.data = {}; //date stores x and y coord on spritesheet, w and h of sprite, and number of frames this.load = function(rImage, data, callback){ this.data = data; this.image = new Image(); this.image.onload = callback; this.image.src = rImage; }; this.draw = function(sprite, x, y, frame){ var s = this.data[sprite]; //this sprite frame = !frame ? 0 : frame; //default frame is 0 cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM); }; }; /***********************/ /* private classes /***********************/ var startScreen = function(){ // start screen. // on update check if enter key is pressed // load level 1 if enter is pressed this.update = function(){ if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.textAlign = "center"; that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75); that.cx.font = "14px verdana"; that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2); that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30); that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60); that.cx.font = "10px verdana"; that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100); that.cx.font = "14px verdana"; that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140); } }; }; var overScreen = function(){ // game over screen. // display scores. // on update check if enter or esc key is pressed // load level 1 if enter is pressed // load start screen if esc is pressed var totalTime = that.timePlayed; that.timePlayed = 0; this.update = function(){ if(that.keysPressed.indexOf(27) > -1){ that.loadLevel('start'); }else if(that.keysPressed.indexOf(13) > -1){ that.loadLevel(1); } }; this.draw = function(){ if(that.cx !== undefined){ var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1); var scorePerLevelComplete = 100; var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1); score = isNaN(score) ? 0 : score; var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time." that.cx.fillStyle=txtColor; that.cx.font = "48px verdana"; that.cx.fillText("Game Over", that.w/2, that.h/2 - 100); that.cx.font = "16px verdana"; that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50); that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20); that.cx.font = "12px verdana"; that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50); that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80); } }; }; var Hero = function(game, x, y, sprite){ /***********************/ /* hero class /***********************/ //start tile position this.x = x; this.y = y; this.frame = 1; // start frame this.s = sprite; // hero sprite //hero movement speed this.dx = 0; this.dy = 0; // tick used to control movement speed // hero will move dx or dy distance every // 50 ms this.xTick = 0; this.yTick = 0; // jetpack this.jetpack = false; this.jetpackTimer = 5000; this.fireTick = 0; // tick to control fire frequency var that = this; var chkCol = function(t, h, e){ // check collision with map // or with enemy if(t == "map")
else{ // check collision with object h and object e var abs = Math.abs; return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1)); } }; this.update = function(i){ // add interval to all ticks this.xTick += i; this.yTick += i; this.fireTick += i; // check if any relevant keys are being pressed var left = game.keysPressed.indexOf(37); var right = game.keysPressed.indexOf(39); var jetpack = game.keysPressed.indexOf(40); var fire = game.keysPressed.indexOf(32); // if down arrow is pressed, set jetpack to true and // update jetpackTimer with interval // if allowed jetpack time is exceeded, set jetpack to false this.jetpack = jetpack > -1 ? true : false; this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer; if(this.jetpackTimer <= 0){ this.jetpack = false; this.jetpackTimer = 0; } // check for right and left arrow key and set dx accordingly this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0; // check for space key and run fire method if pressed if(fire > -1)this.fire(); // if jetpack is true, set dy accordingly if(this.jetpack){ this.dy = 0.25; }else{ // if jetpack is false, check for // collision with tile and set dy to // 0 if true and apply fall to dy if // false. if(this.y % 1 == 0){ if(chkCol('map')){ this.dy = 0; this.yTick = 0; }else{ this.dy = -0.25; } } } // apply dx and dy to x and y every 50 ms // then reset x and y Ticks if(this.xTick >= 50){ this.x += this.dx; this.xTick = 0; } if(this.yTick >= 50){ this.y += this.dy; this.yTick = 0; } // change directional frame accordingly var f = this.frame; if(this.dx > 0){ f = 1; }else if(this.dx < 0){ f = 0; } // add 2 to frame if jetpack is true if(this.jetpack && (f == 1 || f == 0)){ f = f + 2; }else if(!this.jetpack && (f > 1)){ f = f - 2; } this.frame = f; // check if hero has gone off screen and end game if so if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){ game.loadLevel('over'); } // check for collision with enemy // this also checks if there are no enemies // left and loads the next level if that's // the case var levelOver = true; for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ levelOver = false; e = game.objQ[i]; if(chkCol('enemy', this, e)){ game.loadLevel('over'); break; } } } if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level }; this.draw = function(){ // draws the jetpack timer and the hero sprite game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25); sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame); }; this.fire = function(){ // fires a bullet every 250 ms if the fire // key is pressed if(this.fireTick > 250){ // create a new bullet object at the hero's // position and add it to the object queue var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction var bullet = new Bullet(game, this.x, this.y, dir, 'bullet'); game.objQ.push(bullet); this.fireTick = 0; } }; }; var Bullet = function(game, x, y, dir, sprite){ this.s = sprite; this.x = x; this.y = y; this.dir = dir; // direction this.animTick = 0; // move bullet every 25 ms var chkCol = function(b, e){ // check collision between b and e objects // b = bullet, e = enemy var abs = Math.abs; return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1)); }; this.destroy = function(){ // if bullet exists in object queue // remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; this.update = function(i){ // update bullet x position every animTick this.animTick += i; if(this.animTick >= 25){ this.x += dir > 0 ? 0.25 : -0.25; this.animTick = 0; } // loop through enemy objects and check for // collision. If collided, destroy bullet // and enemy. for(var i in game.objQ){ if(game.objQ[i].constructor.name == 'Enemy'){ e = game.objQ[i]; if(chkCol(this, e)){ this.destroy(); e.destroy(); break; } } } // destroy bullet if gone off screen if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){ this.destroy(); } }; this.draw = function(){ // draw bullet sprites.draw(this.s,this.x*game.ts,this.y*game.ts); }; return this; }; function Enemy(game, group, sprite){ // x, y, sx, sy, this.s = sprite; this.frame = 0; // start frame // movement tick this.xtick = 0; this.ytick = 0; // variables to restrict movement to given platform // xs = xStart, xe = xEnd. They correspond to tile // start and tile end positions this.xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; this.xe = typeof(group[0]) !== 'number' ? group[0][1]-1 : group[0]-1; this.ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; this.ye = typeof(group[1]) !== 'number' ? group[1][1]-1 : group[1]-1; // set start position and speed this.x = Math.floor(Math.random() * (this.xe - this.xs)) + this.xs; this.y = Math.floor(Math.random() * (this.ye - this.ys)) + (this.ys+2); this.dx = ((Math.random() * 100) + 25) * (Math.ceil((Math.random() * 2)) === 1 ? 1 : -1); this.dy = 0; this.dir = this.dx < 0 ? 0 : 2; // direction used to calculate correct frame to display this.animTick = 0; // change sprite frame every 250 ms var that = this; this.update = function(i){ // update tics with interval this.animTick += i; this.xtick += i; this.ytick += i; // do animation every 250 ms if(this.animTick >= 250){ this.frame++; if(this.frame > 1)this.frame = 0; this.animTick = 0; } // enemy movement is a bit different from every other moverment // because enemy movement must be confined to a platform, it's // easier to move in 0.25 increments, so the speed calculation // is the time in ms, to update the enemy position. if(this.xtick >= Math.abs(this.dx)){ this.x += this.dx < 0 ? -0.25 : this.dx == 0 ? 0 : 0.25; this.xtick = 0; if(this.x >= this.xe || this.x <= this.xs){ // flip direction and set x to the edge of platform this.x = this.x >= this.xe ? this.xe : this.xs; this.dx *= -1; this.dir = this.dx < 0 ? 0 : 2; } } if(this.ytick >= Math.abs(this.dy)){ this.y += this.dy < 0 ? -0.25 : this.dy == 0 ? 0 : 0.25; this.ytick = 0; if(this.y == this.ye || this.y == this.ys){ this.dy *= -1; } } }; this.draw = function(){ sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame+this.dir); }; this.destroy = function(){ // check if this enemy is in the object queue and remove it var index = game.objQ.indexOf(this); if(index > -1){ game.objQ.splice(index, 1); } }; return this; }; var Level = function(game, s){ /***********************/ /* level class /***********************/ // map array. This is a 2d array of tiles generated from the platform // groups created in this.init(). Format = [0,0,0,0,0,1,1,1,1...] var map = []; var tileGroup = Math.floor(Math.random() * 2); // determines the tiles to use var sprite = s; // tile sprite for platforms this.init = function(){ // initialize level var m = []; // map array to hold tile groups var enemies = []; // array to hold enemies /***********************************/ /* generate the map dynamically /***********************************/ var platforms = Math.ceil(Math.random() * 2) + 4; // first calculate number of platforms var yVals = []; // array to hold the y value of each platform var lastVal = -1; // last platform's y value, this is to prevent platforms from being too close to each other // loop through platforms and generate [x,y] value group for(var n = 0; n <= platforms; n++){ // y value is calculated at random within a range // of 4 and minimum 4 tiles away from last y value var tY = Math.floor(Math.random() * 4) + lastVal + 4; if(tY < 35){ // don't create any platforms past tile 35, this is to sllow space for start platform // generate x start and x end values. x start is contained within // the first half of the map and x end is contained within the second // half of the map. There is provision for blank space on either side // of the platform to prevent the platform from spanning the width of the map var tX1 = Math.floor(Math.random() * (game.tileW / 2)) + 1; var tX2 = Math.floor(Math.random() * ((game.tileW / 2) - 10)) + (game.tileW / 2) + 10; var xVal = [tX1, tX2]; // set the xVal group var thisP = [xVal, tY]; // set the tile group m.push(thisP); // push this group to the map array // generate random number of enemies for this platform // this is based off of the size of the platform var e = Math.floor((tX2-tX1)/3); for(var i = 0; i <= e; i++){ // add enemy to enemy array enemies.push([n,"zombie"]); // format is [platform, sprite] } lastVal = tY; // reset last y value } } m.push([[0,1],37]); // add hero start platform // convert the m array storing platform groups into the // map array which stores tile values of 0 or 1. // format of map = [0,0,0,0,1,1,1,1,1,0...] var cPos = 0; // current position in 2 d array for(g in m){ // loop through each group var group = m[g]; // get x and y start and end values var xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0]; var xe = typeof(group[0]) !== 'number' ? group[0][1] : group[0]; var ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1]; var ye = typeof(group[1]) !== 'number' ? group[1][1] : group[1]; // convert [x,y] start value to 2d array index. // add [0] tiles to map array from cPos to // calculated start value, then add [1] tiles // to map array from start value to end value. for(;cPos < (ys*that.tileW)+xs; cPos++){ map[cPos] = 0; } for(;cPos < (ye*that.tileW)+xe; cPos++){ map[cPos] = 1; } } // after all platforms are created, complete map array // with [0] tiles. for(;cPos < that.tileW * that.tileH; cPos++){ map[cPos] = 0; } // loop through enemies array and create them // then add them to object queue for(var i = 0; i < enemies.length; i++){ var group = m[enemies[i][0]]; // get enemy group var sprite = enemies[i][1]; // get enemy sprite var thisEnemy = new Enemy(game, group, sprite); // create new enemy game.objQ.push(thisEnemy); // add to object queue } // create the hero and add to object queue var hero = new Hero(game, 0, 38, 'hero'); game.objQ.push(hero); }; this.update = function(i){ // update total time played game.timePlayed += i; }; this.draw = function(){ // draw each tile for(i in map){ var tx = i % game.tileW; var ty = parseInt(i / game.tileW); sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 1); if(map[i] === 1){ sprites.draw(sprite, tx*game.ts,ty*game.ts,(tileGroup*2) + 0); } } // draw current level and time elapsed game.cx.fillStyle="#FFF"; //set background color game.cx.fillRect(0, 0, game.w, game.topStart * game.ts); //fill background game.cx.fillStyle=txtColor; game.cx.font = "12px verdana"; game.cx.fillText("Current Level: " + game.currLevel, 70, 25); game.cx.fillText("Lapsed Time: " + (game.timePlayed/1000).toFixed(2) + " seconds", 230, 25); }; this.getMap = function(){ return map; // return map }; this.getTile = function(x,y){ // return 0 or 1 for tile and [x,y] position var pos = (y * game.tileW) + x; return map[pos]; }; this.init(); // initialize level return this; }; this.init(); //initialize game return this; };
{ var yBelow = Math.ceil(that.y)-1; var xBelow1 = Math.floor(that.x); var xBelow2 = Math.ceil(that.x); // check collision with tiles below // and on either side of hero var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow); var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow); if(tileBelow1 == 1 || tileBelow2 == 1){ return true; } return false; }
conditional_block
terminal.rs
use crate::os_glue::Glue; use crate::{Features, Key, TermOut}; use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX}; use std::error::Error; use std::mem; use std::panic::PanicInfo; use std::sync::Arc; use std::time::Duration; /// Actor that manages the connection to the terminal pub struct Terminal { resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>,
termout: Share<TermOut>, glue: Glue, disable_output: bool, paused: bool, inbuf: Vec<u8>, check_enable: bool, force_timer: MaxTimerKey, check_timer: MaxTimerKey, cleanup: Vec<u8>, panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>, } impl Terminal { /// Set up the terminal. Sends a message back to `resize` /// immediately, which provides a reference to the shared /// [`TermOut`] which is used to buffer and flush terminal output /// data. /// /// Whenever the window size changes, a new `resize` message is /// sent. When the terminal output is paused, `None` is sent to /// `resize` to let the app know that there is no output available /// right now. /// /// Input keys received are sent to `input` once decoded. /// /// In case of an error that can't be handled, cleans up the /// terminal state and terminates the actor with /// `ActorDied::Failed`. The actor that created the terminal can /// catch that and do whatever cleanup is necessary before /// aborting the process. /// /// # Panic handling /// /// When Rust panics, the terminal must be restored to its normal /// state otherwise things would be left in a bad state for the /// user (in cooked mode with no echo, requiring the user to /// blindly type `reset` on the command-line). So this code saves /// a copy of the current panic handler (using /// `std::panic::take_hook`), and then installs its own handler /// that does terminal cleanup before calling on to the saved /// panic handler. This mean that if any custom panic handler is /// needed by the application, then it must be set up before the /// call to [`Terminal::init`]. /// /// [`TermOut`]: struct.TermOut.html pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> { // TODO: Query TERM/terminfo/environment for features to put in Features let features = Features { colour_256: false }; let term = cx.this().clone(); let glue = match Glue::new(cx, term) { Ok(v) => v, Err(e) => { cx.fail(e); return None; } }; let termout = Share::new(cx, TermOut::new(features)); let mut this = Self { resize, input, termout, glue, disable_output: false, paused: false, inbuf: Vec::new(), check_enable: false, force_timer: MaxTimerKey::default(), check_timer: MaxTimerKey::default(), cleanup: b"\x1Bc".to_vec(), panic_hook: Arc::new(std::panic::take_hook()), }; this.handle_resize(cx); this.update_panic_hook(); Some(this) } /// Enable or disable generation of the [`Key::Check`] keypress, /// which occurs in a gap in typing, 300ms after the last key /// pressed. This may be used to do validation if that's too /// expensive to do on every keypress. /// /// [`Key::Check`]: enum.Key.html#variant.Check pub fn check(&mut self, _cx: CX![], enable: bool) { self.check_enable = enable; } /// Ring the bell (i.e. beep) immediately. Doesn't wait for the /// buffered terminal data to be flushed. Will output even when /// paused. pub fn bell(&mut self, cx: CX![]) { if !self.disable_output { if let Err(e) = self.glue.write(&b"\x07"[..]) { self.disable_output = true; self.failure(cx, e); } } } /// Pause terminal input and output handling. Sends the cleanup /// sequence to the terminal, and switches to cooked mode. Sends /// a `resize` message with `None` to tell the app that output is /// disabled. /// /// This call should be used before forking off a process which /// might prompt the user and receive user input, otherwise this /// process would compete with the sub-process for user input. /// Resume after the subprocess has finished with the `resume` /// call. pub fn pause(&mut self, cx: CX![]) { if !self.paused { fwd!([self.resize], None); self.glue.input(false); self.termout.rw(cx).discard(); self.termout.rw(cx).bytes(&self.cleanup[..]); self.termout.rw(cx).flush(); self.flush(cx); self.paused = true; self.update_panic_hook(); } } /// Resume terminal output and input handling. Switches to raw /// mode and sends a resize message to trigger a full redraw. pub fn resume(&mut self, cx: CX![]) { if self.paused { self.paused = false; self.glue.input(true); self.termout.rw(cx).discard(); self.handle_resize(cx); self.update_panic_hook(); } } // Handle an unrecoverable failure. Try to clean up before // terminating the actor. fn failure(&mut self, cx: CX![], e: impl Error + 'static) { self.pause(cx); cx.fail(e); } /// Flush to the terminal all the data that's ready for sending /// from the TermOut buffer. Use [`TermOut::flush`] first to mark /// the point up to which data should be flushed. /// /// [`TermOut::flush`]: struct.TermOut.html#method.flush pub fn flush(&mut self, cx: CX![]) { if self.termout.rw(cx).new_cleanup.is_some() { // Don't replace unless we're sure there's a new value if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) { self.cleanup = cleanup; self.update_panic_hook(); } } if !self.disable_output { if self.paused { // Just drop the output whilst paused. We'll trigger // a full refresh on resuming self.termout.rw(cx).drain_flush(); } else { let ob = self.termout.rw(cx); let result = self.glue.write(ob.data_to_flush()); ob.drain_flush(); if let Err(e) = result { self.disable_output = true; self.failure(cx, e); } } } } /// Handle a resize event from the TTY. Gets new size, and /// notifies upstream. pub(crate) fn handle_resize(&mut self, cx: CX![]) { match self.glue.get_size() { Ok((sy, sx)) => { self.termout.rw(cx).set_size(sy, sx); fwd!([self.resize], Some(self.termout.clone())); } Err(e) => self.failure(cx, e), } } /// Handle an I/O error on the TTY input pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) { self.failure(cx, err); } /// Handle new bytes from the TTY input pub(crate) fn handle_data_in(&mut self, cx: CX![]) { self.glue.read_data(&mut self.inbuf); self.do_data_in(cx, false); } fn do_data_in(&mut self, cx: CX![], force: bool) { let mut pos = 0; let len = self.inbuf.len(); if len != 0 { if !force { // Note that this is too fast to catch M-Esc passed // through screen, as that seems to apply a 300ms // pause between the two Esc chars. For everything // else including real terminals it should be okay. timer_max!( &mut self.force_timer, cx.now() + Duration::from_millis(100), [cx], do_data_in(true) ); } while pos < len { match Key::decode(&self.inbuf[pos..len], force) { None => break, Some((count, key)) => { pos += count; fwd!([self.input], key); if self.check_enable { let check_expiry = cx.now() + Duration::from_millis(300); timer_max!(&mut self.check_timer, check_expiry, [cx], check_key()); } } } } } self.inbuf.drain(..pos); } fn check_key(&mut self, _cx: CX![]) { if self.check_enable { fwd!([self.input], Key::Check); } } // Install a panic hook that (if necessary) outputs the current // cleanup string, restores cooked mode and then does the default // panic action (e.g. dump out backtrace). This should be called // every time we switch to/from raw mode, and every time the // cleanup string is changed. fn update_panic_hook(&mut self) { // Discard old hook let _ = std::panic::take_hook(); let defhook = self.panic_hook.clone(); if self.paused { std::panic::set_hook(Box::new(move |info| defhook(info))); } else { let cleanup_fn = self.glue.cleanup_fn(); let cleanup = self.cleanup.clone(); std::panic::set_hook(Box::new(move |info| { cleanup_fn(&cleanup[..]); defhook(info); })); } } } impl Drop for Terminal { fn drop(&mut self) { // Drop panic hook and clean up terminal let _ = std::panic::take_hook(); if !self.paused { self.glue.cleanup_fn()(&self.cleanup[..]); } } }
random_line_split
terminal.rs
use crate::os_glue::Glue; use crate::{Features, Key, TermOut}; use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX}; use std::error::Error; use std::mem; use std::panic::PanicInfo; use std::sync::Arc; use std::time::Duration; /// Actor that manages the connection to the terminal pub struct Terminal { resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>, termout: Share<TermOut>, glue: Glue, disable_output: bool, paused: bool, inbuf: Vec<u8>, check_enable: bool, force_timer: MaxTimerKey, check_timer: MaxTimerKey, cleanup: Vec<u8>, panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>, } impl Terminal { /// Set up the terminal. Sends a message back to `resize` /// immediately, which provides a reference to the shared /// [`TermOut`] which is used to buffer and flush terminal output /// data. /// /// Whenever the window size changes, a new `resize` message is /// sent. When the terminal output is paused, `None` is sent to /// `resize` to let the app know that there is no output available /// right now. /// /// Input keys received are sent to `input` once decoded. /// /// In case of an error that can't be handled, cleans up the /// terminal state and terminates the actor with /// `ActorDied::Failed`. The actor that created the terminal can /// catch that and do whatever cleanup is necessary before /// aborting the process. /// /// # Panic handling /// /// When Rust panics, the terminal must be restored to its normal /// state otherwise things would be left in a bad state for the /// user (in cooked mode with no echo, requiring the user to /// blindly type `reset` on the command-line). So this code saves /// a copy of the current panic handler (using /// `std::panic::take_hook`), and then installs its own handler /// that does terminal cleanup before calling on to the saved /// panic handler. This mean that if any custom panic handler is /// needed by the application, then it must be set up before the /// call to [`Terminal::init`]. /// /// [`TermOut`]: struct.TermOut.html pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> { // TODO: Query TERM/terminfo/environment for features to put in Features let features = Features { colour_256: false }; let term = cx.this().clone(); let glue = match Glue::new(cx, term) { Ok(v) => v, Err(e) => { cx.fail(e); return None; } }; let termout = Share::new(cx, TermOut::new(features)); let mut this = Self { resize, input, termout, glue, disable_output: false, paused: false, inbuf: Vec::new(), check_enable: false, force_timer: MaxTimerKey::default(), check_timer: MaxTimerKey::default(), cleanup: b"\x1Bc".to_vec(), panic_hook: Arc::new(std::panic::take_hook()), }; this.handle_resize(cx); this.update_panic_hook(); Some(this) } /// Enable or disable generation of the [`Key::Check`] keypress, /// which occurs in a gap in typing, 300ms after the last key /// pressed. This may be used to do validation if that's too /// expensive to do on every keypress. /// /// [`Key::Check`]: enum.Key.html#variant.Check pub fn check(&mut self, _cx: CX![], enable: bool) { self.check_enable = enable; } /// Ring the bell (i.e. beep) immediately. Doesn't wait for the /// buffered terminal data to be flushed. Will output even when /// paused. pub fn bell(&mut self, cx: CX![]) { if !self.disable_output { if let Err(e) = self.glue.write(&b"\x07"[..]) { self.disable_output = true; self.failure(cx, e); } } } /// Pause terminal input and output handling. Sends the cleanup /// sequence to the terminal, and switches to cooked mode. Sends /// a `resize` message with `None` to tell the app that output is /// disabled. /// /// This call should be used before forking off a process which /// might prompt the user and receive user input, otherwise this /// process would compete with the sub-process for user input. /// Resume after the subprocess has finished with the `resume` /// call. pub fn pause(&mut self, cx: CX![])
/// Resume terminal output and input handling. Switches to raw /// mode and sends a resize message to trigger a full redraw. pub fn resume(&mut self, cx: CX![]) { if self.paused { self.paused = false; self.glue.input(true); self.termout.rw(cx).discard(); self.handle_resize(cx); self.update_panic_hook(); } } // Handle an unrecoverable failure. Try to clean up before // terminating the actor. fn failure(&mut self, cx: CX![], e: impl Error + 'static) { self.pause(cx); cx.fail(e); } /// Flush to the terminal all the data that's ready for sending /// from the TermOut buffer. Use [`TermOut::flush`] first to mark /// the point up to which data should be flushed. /// /// [`TermOut::flush`]: struct.TermOut.html#method.flush pub fn flush(&mut self, cx: CX![]) { if self.termout.rw(cx).new_cleanup.is_some() { // Don't replace unless we're sure there's a new value if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) { self.cleanup = cleanup; self.update_panic_hook(); } } if !self.disable_output { if self.paused { // Just drop the output whilst paused. We'll trigger // a full refresh on resuming self.termout.rw(cx).drain_flush(); } else { let ob = self.termout.rw(cx); let result = self.glue.write(ob.data_to_flush()); ob.drain_flush(); if let Err(e) = result { self.disable_output = true; self.failure(cx, e); } } } } /// Handle a resize event from the TTY. Gets new size, and /// notifies upstream. pub(crate) fn handle_resize(&mut self, cx: CX![]) { match self.glue.get_size() { Ok((sy, sx)) => { self.termout.rw(cx).set_size(sy, sx); fwd!([self.resize], Some(self.termout.clone())); } Err(e) => self.failure(cx, e), } } /// Handle an I/O error on the TTY input pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) { self.failure(cx, err); } /// Handle new bytes from the TTY input pub(crate) fn handle_data_in(&mut self, cx: CX![]) { self.glue.read_data(&mut self.inbuf); self.do_data_in(cx, false); } fn do_data_in(&mut self, cx: CX![], force: bool) { let mut pos = 0; let len = self.inbuf.len(); if len != 0 { if !force { // Note that this is too fast to catch M-Esc passed // through screen, as that seems to apply a 300ms // pause between the two Esc chars. For everything // else including real terminals it should be okay. timer_max!( &mut self.force_timer, cx.now() + Duration::from_millis(100), [cx], do_data_in(true) ); } while pos < len { match Key::decode(&self.inbuf[pos..len], force) { None => break, Some((count, key)) => { pos += count; fwd!([self.input], key); if self.check_enable { let check_expiry = cx.now() + Duration::from_millis(300); timer_max!(&mut self.check_timer, check_expiry, [cx], check_key()); } } } } } self.inbuf.drain(..pos); } fn check_key(&mut self, _cx: CX![]) { if self.check_enable { fwd!([self.input], Key::Check); } } // Install a panic hook that (if necessary) outputs the current // cleanup string, restores cooked mode and then does the default // panic action (e.g. dump out backtrace). This should be called // every time we switch to/from raw mode, and every time the // cleanup string is changed. fn update_panic_hook(&mut self) { // Discard old hook let _ = std::panic::take_hook(); let defhook = self.panic_hook.clone(); if self.paused { std::panic::set_hook(Box::new(move |info| defhook(info))); } else { let cleanup_fn = self.glue.cleanup_fn(); let cleanup = self.cleanup.clone(); std::panic::set_hook(Box::new(move |info| { cleanup_fn(&cleanup[..]); defhook(info); })); } } } impl Drop for Terminal { fn drop(&mut self) { // Drop panic hook and clean up terminal let _ = std::panic::take_hook(); if !self.paused { self.glue.cleanup_fn()(&self.cleanup[..]); } } }
{ if !self.paused { fwd!([self.resize], None); self.glue.input(false); self.termout.rw(cx).discard(); self.termout.rw(cx).bytes(&self.cleanup[..]); self.termout.rw(cx).flush(); self.flush(cx); self.paused = true; self.update_panic_hook(); } }
identifier_body
terminal.rs
use crate::os_glue::Glue; use crate::{Features, Key, TermOut}; use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX}; use std::error::Error; use std::mem; use std::panic::PanicInfo; use std::sync::Arc; use std::time::Duration; /// Actor that manages the connection to the terminal pub struct Terminal { resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>, termout: Share<TermOut>, glue: Glue, disable_output: bool, paused: bool, inbuf: Vec<u8>, check_enable: bool, force_timer: MaxTimerKey, check_timer: MaxTimerKey, cleanup: Vec<u8>, panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>, } impl Terminal { /// Set up the terminal. Sends a message back to `resize` /// immediately, which provides a reference to the shared /// [`TermOut`] which is used to buffer and flush terminal output /// data. /// /// Whenever the window size changes, a new `resize` message is /// sent. When the terminal output is paused, `None` is sent to /// `resize` to let the app know that there is no output available /// right now. /// /// Input keys received are sent to `input` once decoded. /// /// In case of an error that can't be handled, cleans up the /// terminal state and terminates the actor with /// `ActorDied::Failed`. The actor that created the terminal can /// catch that and do whatever cleanup is necessary before /// aborting the process. /// /// # Panic handling /// /// When Rust panics, the terminal must be restored to its normal /// state otherwise things would be left in a bad state for the /// user (in cooked mode with no echo, requiring the user to /// blindly type `reset` on the command-line). So this code saves /// a copy of the current panic handler (using /// `std::panic::take_hook`), and then installs its own handler /// that does terminal cleanup before calling on to the saved /// panic handler. This mean that if any custom panic handler is /// needed by the application, then it must be set up before the /// call to [`Terminal::init`]. /// /// [`TermOut`]: struct.TermOut.html pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> { // TODO: Query TERM/terminfo/environment for features to put in Features let features = Features { colour_256: false }; let term = cx.this().clone(); let glue = match Glue::new(cx, term) { Ok(v) => v, Err(e) => { cx.fail(e); return None; } }; let termout = Share::new(cx, TermOut::new(features)); let mut this = Self { resize, input, termout, glue, disable_output: false, paused: false, inbuf: Vec::new(), check_enable: false, force_timer: MaxTimerKey::default(), check_timer: MaxTimerKey::default(), cleanup: b"\x1Bc".to_vec(), panic_hook: Arc::new(std::panic::take_hook()), }; this.handle_resize(cx); this.update_panic_hook(); Some(this) } /// Enable or disable generation of the [`Key::Check`] keypress, /// which occurs in a gap in typing, 300ms after the last key /// pressed. This may be used to do validation if that's too /// expensive to do on every keypress. /// /// [`Key::Check`]: enum.Key.html#variant.Check pub fn check(&mut self, _cx: CX![], enable: bool) { self.check_enable = enable; } /// Ring the bell (i.e. beep) immediately. Doesn't wait for the /// buffered terminal data to be flushed. Will output even when /// paused. pub fn bell(&mut self, cx: CX![]) { if !self.disable_output { if let Err(e) = self.glue.write(&b"\x07"[..]) { self.disable_output = true; self.failure(cx, e); } } } /// Pause terminal input and output handling. Sends the cleanup /// sequence to the terminal, and switches to cooked mode. Sends /// a `resize` message with `None` to tell the app that output is /// disabled. /// /// This call should be used before forking off a process which /// might prompt the user and receive user input, otherwise this /// process would compete with the sub-process for user input. /// Resume after the subprocess has finished with the `resume` /// call. pub fn
(&mut self, cx: CX![]) { if !self.paused { fwd!([self.resize], None); self.glue.input(false); self.termout.rw(cx).discard(); self.termout.rw(cx).bytes(&self.cleanup[..]); self.termout.rw(cx).flush(); self.flush(cx); self.paused = true; self.update_panic_hook(); } } /// Resume terminal output and input handling. Switches to raw /// mode and sends a resize message to trigger a full redraw. pub fn resume(&mut self, cx: CX![]) { if self.paused { self.paused = false; self.glue.input(true); self.termout.rw(cx).discard(); self.handle_resize(cx); self.update_panic_hook(); } } // Handle an unrecoverable failure. Try to clean up before // terminating the actor. fn failure(&mut self, cx: CX![], e: impl Error + 'static) { self.pause(cx); cx.fail(e); } /// Flush to the terminal all the data that's ready for sending /// from the TermOut buffer. Use [`TermOut::flush`] first to mark /// the point up to which data should be flushed. /// /// [`TermOut::flush`]: struct.TermOut.html#method.flush pub fn flush(&mut self, cx: CX![]) { if self.termout.rw(cx).new_cleanup.is_some() { // Don't replace unless we're sure there's a new value if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) { self.cleanup = cleanup; self.update_panic_hook(); } } if !self.disable_output { if self.paused { // Just drop the output whilst paused. We'll trigger // a full refresh on resuming self.termout.rw(cx).drain_flush(); } else { let ob = self.termout.rw(cx); let result = self.glue.write(ob.data_to_flush()); ob.drain_flush(); if let Err(e) = result { self.disable_output = true; self.failure(cx, e); } } } } /// Handle a resize event from the TTY. Gets new size, and /// notifies upstream. pub(crate) fn handle_resize(&mut self, cx: CX![]) { match self.glue.get_size() { Ok((sy, sx)) => { self.termout.rw(cx).set_size(sy, sx); fwd!([self.resize], Some(self.termout.clone())); } Err(e) => self.failure(cx, e), } } /// Handle an I/O error on the TTY input pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) { self.failure(cx, err); } /// Handle new bytes from the TTY input pub(crate) fn handle_data_in(&mut self, cx: CX![]) { self.glue.read_data(&mut self.inbuf); self.do_data_in(cx, false); } fn do_data_in(&mut self, cx: CX![], force: bool) { let mut pos = 0; let len = self.inbuf.len(); if len != 0 { if !force { // Note that this is too fast to catch M-Esc passed // through screen, as that seems to apply a 300ms // pause between the two Esc chars. For everything // else including real terminals it should be okay. timer_max!( &mut self.force_timer, cx.now() + Duration::from_millis(100), [cx], do_data_in(true) ); } while pos < len { match Key::decode(&self.inbuf[pos..len], force) { None => break, Some((count, key)) => { pos += count; fwd!([self.input], key); if self.check_enable { let check_expiry = cx.now() + Duration::from_millis(300); timer_max!(&mut self.check_timer, check_expiry, [cx], check_key()); } } } } } self.inbuf.drain(..pos); } fn check_key(&mut self, _cx: CX![]) { if self.check_enable { fwd!([self.input], Key::Check); } } // Install a panic hook that (if necessary) outputs the current // cleanup string, restores cooked mode and then does the default // panic action (e.g. dump out backtrace). This should be called // every time we switch to/from raw mode, and every time the // cleanup string is changed. fn update_panic_hook(&mut self) { // Discard old hook let _ = std::panic::take_hook(); let defhook = self.panic_hook.clone(); if self.paused { std::panic::set_hook(Box::new(move |info| defhook(info))); } else { let cleanup_fn = self.glue.cleanup_fn(); let cleanup = self.cleanup.clone(); std::panic::set_hook(Box::new(move |info| { cleanup_fn(&cleanup[..]); defhook(info); })); } } } impl Drop for Terminal { fn drop(&mut self) { // Drop panic hook and clean up terminal let _ = std::panic::take_hook(); if !self.paused { self.glue.cleanup_fn()(&self.cleanup[..]); } } }
pause
identifier_name
terminal.rs
use crate::os_glue::Glue; use crate::{Features, Key, TermOut}; use stakker::{fwd, timer_max, Fwd, MaxTimerKey, Share, CX}; use std::error::Error; use std::mem; use std::panic::PanicInfo; use std::sync::Arc; use std::time::Duration; /// Actor that manages the connection to the terminal pub struct Terminal { resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>, termout: Share<TermOut>, glue: Glue, disable_output: bool, paused: bool, inbuf: Vec<u8>, check_enable: bool, force_timer: MaxTimerKey, check_timer: MaxTimerKey, cleanup: Vec<u8>, panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>, } impl Terminal { /// Set up the terminal. Sends a message back to `resize` /// immediately, which provides a reference to the shared /// [`TermOut`] which is used to buffer and flush terminal output /// data. /// /// Whenever the window size changes, a new `resize` message is /// sent. When the terminal output is paused, `None` is sent to /// `resize` to let the app know that there is no output available /// right now. /// /// Input keys received are sent to `input` once decoded. /// /// In case of an error that can't be handled, cleans up the /// terminal state and terminates the actor with /// `ActorDied::Failed`. The actor that created the terminal can /// catch that and do whatever cleanup is necessary before /// aborting the process. /// /// # Panic handling /// /// When Rust panics, the terminal must be restored to its normal /// state otherwise things would be left in a bad state for the /// user (in cooked mode with no echo, requiring the user to /// blindly type `reset` on the command-line). So this code saves /// a copy of the current panic handler (using /// `std::panic::take_hook`), and then installs its own handler /// that does terminal cleanup before calling on to the saved /// panic handler. This mean that if any custom panic handler is /// needed by the application, then it must be set up before the /// call to [`Terminal::init`]. /// /// [`TermOut`]: struct.TermOut.html pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> { // TODO: Query TERM/terminfo/environment for features to put in Features let features = Features { colour_256: false }; let term = cx.this().clone(); let glue = match Glue::new(cx, term) { Ok(v) => v, Err(e) => { cx.fail(e); return None; } }; let termout = Share::new(cx, TermOut::new(features)); let mut this = Self { resize, input, termout, glue, disable_output: false, paused: false, inbuf: Vec::new(), check_enable: false, force_timer: MaxTimerKey::default(), check_timer: MaxTimerKey::default(), cleanup: b"\x1Bc".to_vec(), panic_hook: Arc::new(std::panic::take_hook()), }; this.handle_resize(cx); this.update_panic_hook(); Some(this) } /// Enable or disable generation of the [`Key::Check`] keypress, /// which occurs in a gap in typing, 300ms after the last key /// pressed. This may be used to do validation if that's too /// expensive to do on every keypress. /// /// [`Key::Check`]: enum.Key.html#variant.Check pub fn check(&mut self, _cx: CX![], enable: bool) { self.check_enable = enable; } /// Ring the bell (i.e. beep) immediately. Doesn't wait for the /// buffered terminal data to be flushed. Will output even when /// paused. pub fn bell(&mut self, cx: CX![]) { if !self.disable_output { if let Err(e) = self.glue.write(&b"\x07"[..]) { self.disable_output = true; self.failure(cx, e); } } } /// Pause terminal input and output handling. Sends the cleanup /// sequence to the terminal, and switches to cooked mode. Sends /// a `resize` message with `None` to tell the app that output is /// disabled. /// /// This call should be used before forking off a process which /// might prompt the user and receive user input, otherwise this /// process would compete with the sub-process for user input. /// Resume after the subprocess has finished with the `resume` /// call. pub fn pause(&mut self, cx: CX![]) { if !self.paused
} /// Resume terminal output and input handling. Switches to raw /// mode and sends a resize message to trigger a full redraw. pub fn resume(&mut self, cx: CX![]) { if self.paused { self.paused = false; self.glue.input(true); self.termout.rw(cx).discard(); self.handle_resize(cx); self.update_panic_hook(); } } // Handle an unrecoverable failure. Try to clean up before // terminating the actor. fn failure(&mut self, cx: CX![], e: impl Error + 'static) { self.pause(cx); cx.fail(e); } /// Flush to the terminal all the data that's ready for sending /// from the TermOut buffer. Use [`TermOut::flush`] first to mark /// the point up to which data should be flushed. /// /// [`TermOut::flush`]: struct.TermOut.html#method.flush pub fn flush(&mut self, cx: CX![]) { if self.termout.rw(cx).new_cleanup.is_some() { // Don't replace unless we're sure there's a new value if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) { self.cleanup = cleanup; self.update_panic_hook(); } } if !self.disable_output { if self.paused { // Just drop the output whilst paused. We'll trigger // a full refresh on resuming self.termout.rw(cx).drain_flush(); } else { let ob = self.termout.rw(cx); let result = self.glue.write(ob.data_to_flush()); ob.drain_flush(); if let Err(e) = result { self.disable_output = true; self.failure(cx, e); } } } } /// Handle a resize event from the TTY. Gets new size, and /// notifies upstream. pub(crate) fn handle_resize(&mut self, cx: CX![]) { match self.glue.get_size() { Ok((sy, sx)) => { self.termout.rw(cx).set_size(sy, sx); fwd!([self.resize], Some(self.termout.clone())); } Err(e) => self.failure(cx, e), } } /// Handle an I/O error on the TTY input pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) { self.failure(cx, err); } /// Handle new bytes from the TTY input pub(crate) fn handle_data_in(&mut self, cx: CX![]) { self.glue.read_data(&mut self.inbuf); self.do_data_in(cx, false); } fn do_data_in(&mut self, cx: CX![], force: bool) { let mut pos = 0; let len = self.inbuf.len(); if len != 0 { if !force { // Note that this is too fast to catch M-Esc passed // through screen, as that seems to apply a 300ms // pause between the two Esc chars. For everything // else including real terminals it should be okay. timer_max!( &mut self.force_timer, cx.now() + Duration::from_millis(100), [cx], do_data_in(true) ); } while pos < len { match Key::decode(&self.inbuf[pos..len], force) { None => break, Some((count, key)) => { pos += count; fwd!([self.input], key); if self.check_enable { let check_expiry = cx.now() + Duration::from_millis(300); timer_max!(&mut self.check_timer, check_expiry, [cx], check_key()); } } } } } self.inbuf.drain(..pos); } fn check_key(&mut self, _cx: CX![]) { if self.check_enable { fwd!([self.input], Key::Check); } } // Install a panic hook that (if necessary) outputs the current // cleanup string, restores cooked mode and then does the default // panic action (e.g. dump out backtrace). This should be called // every time we switch to/from raw mode, and every time the // cleanup string is changed. fn update_panic_hook(&mut self) { // Discard old hook let _ = std::panic::take_hook(); let defhook = self.panic_hook.clone(); if self.paused { std::panic::set_hook(Box::new(move |info| defhook(info))); } else { let cleanup_fn = self.glue.cleanup_fn(); let cleanup = self.cleanup.clone(); std::panic::set_hook(Box::new(move |info| { cleanup_fn(&cleanup[..]); defhook(info); })); } } } impl Drop for Terminal { fn drop(&mut self) { // Drop panic hook and clean up terminal let _ = std::panic::take_hook(); if !self.paused { self.glue.cleanup_fn()(&self.cleanup[..]); } } }
{ fwd!([self.resize], None); self.glue.input(false); self.termout.rw(cx).discard(); self.termout.rw(cx).bytes(&self.cleanup[..]); self.termout.rw(cx).flush(); self.flush(cx); self.paused = true; self.update_panic_hook(); }
conditional_block
importNet.js
import _ from 'lodash'; import config from 'grafana/app/core/config'; import locationUtil from '../utils/location_util'; const appCtrl = require('../utils/appCtrl'); const Influx = require('../utils/Influx'); import * as $ from 'jquery'; //const url = "http://localhost:8086/query?db=mydb&q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; //const url = "http://localhost:8086/query?q=CREATE+DATABASE+dataaaa" ; /* const url = "http://localhost:8086/wirte?q=CREATE+DATABASE+dataaaa" ; const urlI = "http://localhost:8086/db/mydb/series?"; const body = { db:"mydb", name:"foo", columns:["col"], points:[[23]] }; const dataI = "cpu,host='serverA',region='us_west'+value=0.64" ; const urlS = "http://localhost:8086/query?db=mydb/"; const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; $.ajax({ url: "http://localhost:8086/query?db=mydb", headers:{ 'Authorization': 'Basic ' + btoa('admin:admin'), }, type: 'POST', data: { q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64", }, success: function(data) { //we got the response console.log(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); /* let query = 'cpu,host=serverA,region=new value=69'; $.ajax({ url:'http://localhost:8086/write?db=mydb', type:'POST', contentType:'application/octet-stream', data: query, processData: false, success: function (data) { console.info(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); */ //template struttura dashboard let structure = { __inputs: [], __requires: [ { type: "grafana", id: "grafana", name: "Grafana", version: "5.4.0" } ], annotations: { list: [ { builtIn: 1, datasource: "-- Grafana --", enable: true, hide: true, iconColor: "rgba(0, 211, 255, 1)", name: "Annotations & Alerts", type: "dashboard" } ] }, editable: false, gnetId: null, graphTooltip: 0, id: null, links: [], panels: [ { type: "text", title: "Warning from DreamCorp", gridPos: { x: 4, y: 0, w: 16, h: 8 }, id: 0, mode: "markdown", content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism" } ], schemaVersion: 16, style: "dark", tags: ["bayesian-network"], templating: { list: [ { allValue: null, current: { text: "nodo 1", value: "nodo 1" }, hide: 0, includeAll: false, label: "Nodo", multi: false, name: "Nodo", options: [ { selected: true, text: "nodo 1", value: "nodo 1" } ], query: "nodo 1", skipUrlSync: false, type: "custom" } ] }, time: { from: "now-6h", to: "now" }, timepicker: { refresh_intervals: [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], time_options: [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, timezone: "", title: "Rete Bayesiana", uid: "H39FJ39VMA12MD", version: 3, network: null }; export class ImportNetCtrl { /** @ngInject */ constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) { this.backendSrv = backendSrv; this.validationSrv = validationSrv; this.$location = $location; this.$routeParams = $routeParams; this.step = 1; this.nameExists = false; this.uidExists = false; this.autoGenerateUid = true; this.autoGenerateUidValue = 'auto-generated'; this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null; this.initialFolderTitle = 'Select a folder'; // check gnetId in url if ($routeParams.gnetId) { this.gnetUrl = $routeParams.gnetId; this.checkGnetDashboard(); } } static initProbs(net){ let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità for(let i=0;i<prob_nodes.length;i++) net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà } //PERSONALIZZATA onUpload(net) { this.network = net; //per l'html //riceverò sempre una net, gli devo aggiungere il template della dashboard ImportNetCtrl.initProbs(net); structure.title = net.rete; structure.network = net; //attacco il pezzo che ricevo al template console.info("onUpload Rete: "); console.info(structure.network); //creating a db let host ="http://localhost:8086"; let database ="bayesian"; const influx = new Influx(host,database); influx.createDB().then(()=>{ console.info("database created"); let nodes = []; let states = []; let probs = []; for(let i=0;i<net.nodi.length;i++){ nodes.push(net.nodi[i].id); states.push(net.nodi[i].stati); probs.push(net.nodi[i].probs); } /* return influx.insert(nodes,states,probs) .then(()=>console.info("inserted")); */ influx.insert(nodes,states,probs) .then(()=>console.info("inserted") .then(()=>{ influx.retrieve(nodes).then((data)=>{ console.info("retrived"); console.info(data); }); })); }).catch((err)=>console.info(err)); this.dash = structure; //gli do in pasto la struttura completa di dashboard + net this.dash.id = null; this.step = 2; this.inputs = []; if (this.dash.__inputs) { for (const input of this.dash.__inputs) { const inputModel = { name: input.name, label: input.label, info: input.description, value: input.value, type: input.type, pluginId: input.pluginId, options: [], }; if (input.type === 'datasource') { this.setDatasourceOptions(input, inputModel); } else if (!inputModel.info) { inputModel.info = 'Specify a string constant'; } this.inputs.push(inputModel); } } this.inputsValid = this.inputs.length === 0; this.titleChanged(); this.uidChanged(true); } setDatasourceOptions(input, inputModel) { const sources = _.filter(config.datasources, val => { return val.type === input.pluginId; }); if (sources.length === 0) {
se if (!inputModel.info) { inputModel.info = 'Select a ' + input.pluginName + ' data source'; } inputModel.options = sources.map(val => { return { text: val.name, value: val.name }; }); } inputValueChanged() { this.inputsValid = true; for (const input of this.inputs) { if (!input.value) { this.inputsValid = false; } } } titleChanged() { this.titleTouched = true; this.nameExists = false; this.validationSrv .validateNewDashboardName(this.folderId, this.dash.title) .then(() => { this.nameExists = false; this.hasNameValidationError = false; }) .catch(err => { if (err.type === 'EXISTING') { this.nameExists = true; } this.hasNameValidationError = true; this.nameValidationError = err.message; }); } uidChanged(initial) { this.uidExists = false; this.hasUidValidationError = false; if (initial === true && this.dash.uid) { this.autoGenerateUidValue = 'value set'; } this.backendSrv .getDashboardByUid(this.dash.uid) .then(res => { this.uidExists = true; this.hasUidValidationError = true; this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${ res.meta.folderTitle }' has the same uid`; }) .catch(err => { err.isHandled = true; }); } onFolderChange(folder) { this.folderId = folder.id; this.titleChanged(); } onEnterFolderCreation() { this.inputsValid = false; } onExitFolderCreation() { this.inputValueChanged(); } isValid() { return this.inputsValid && this.folderId !== null; } saveDashboard() { const inputs = this.inputs.map(input => { return { name: input.name, type: input.type, pluginId: input.pluginId, value: input.value, }; }); return this.backendSrv .post('api/dashboards/import', { dashboard: this.dash, overwrite: true, inputs: inputs, folderId: this.folderId, }) .then(res => { const dashUrl = locationUtil.stripBaseFromUrl(res.importedUrl); this.$location.url(dashUrl); }); } loadJsonText() { try { this.parseError = ''; this.onUpload(JSON.parse(this.jsonText)); //invio tutto quello che ricevo } catch (err) { console.log(err); this.parseError = err.message; return; } } /* checkGnetDashboard() { this.gnetError = ''; const match = /(^\d+$)|dashboards\/(\d+)/.exec(this.gnetUrl); let dashboardId; if (match && match[1]) { dashboardId = match[1]; } else if (match && match[2]) { dashboardId = match[2]; } else { this.gnetError = 'Could not find dashboard'; } return this.backendSrv .get('api/gnet/dashboards/' + dashboardId) .then(res => { this.gnetInfo = res; // store reference to grafana.com res.json.gnetId = res.id; this.onUpload(res.json); }) .catch(err => { err.isHandled = true; this.gnetError = err.data.message || err; }); } */ back() { this.gnetUrl = ''; this.step = 1; this.gnetError = ''; this.gnetInfo = ''; } } ImportNetCtrl.templateUrl = 'components/importNet.html';
inputModel.info = 'No data sources of type ' + input.pluginName + ' found'; } el
conditional_block
importNet.js
import _ from 'lodash'; import config from 'grafana/app/core/config'; import locationUtil from '../utils/location_util'; const appCtrl = require('../utils/appCtrl'); const Influx = require('../utils/Influx'); import * as $ from 'jquery'; //const url = "http://localhost:8086/query?db=mydb&q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; //const url = "http://localhost:8086/query?q=CREATE+DATABASE+dataaaa" ; /* const url = "http://localhost:8086/wirte?q=CREATE+DATABASE+dataaaa" ; const urlI = "http://localhost:8086/db/mydb/series?"; const body = { db:"mydb", name:"foo", columns:["col"], points:[[23]] }; const dataI = "cpu,host='serverA',region='us_west'+value=0.64" ; const urlS = "http://localhost:8086/query?db=mydb/"; const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; $.ajax({ url: "http://localhost:8086/query?db=mydb", headers:{ 'Authorization': 'Basic ' + btoa('admin:admin'), }, type: 'POST', data: { q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64", }, success: function(data) { //we got the response console.log(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); /* let query = 'cpu,host=serverA,region=new value=69'; $.ajax({ url:'http://localhost:8086/write?db=mydb', type:'POST', contentType:'application/octet-stream', data: query, processData: false, success: function (data) { console.info(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); */ //template struttura dashboard let structure = { __inputs: [], __requires: [ { type: "grafana", id: "grafana", name: "Grafana", version: "5.4.0" } ], annotations: { list: [ { builtIn: 1, datasource: "-- Grafana --", enable: true, hide: true, iconColor: "rgba(0, 211, 255, 1)", name: "Annotations & Alerts", type: "dashboard" } ] }, editable: false, gnetId: null, graphTooltip: 0, id: null, links: [], panels: [ { type: "text", title: "Warning from DreamCorp", gridPos: { x: 4, y: 0, w: 16, h: 8 }, id: 0, mode: "markdown", content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism" } ], schemaVersion: 16, style: "dark", tags: ["bayesian-network"], templating: { list: [ { allValue: null, current: { text: "nodo 1", value: "nodo 1" }, hide: 0, includeAll: false, label: "Nodo", multi: false, name: "Nodo", options: [ { selected: true, text: "nodo 1", value: "nodo 1" } ], query: "nodo 1", skipUrlSync: false, type: "custom" } ] }, time: { from: "now-6h", to: "now" }, timepicker: { refresh_intervals: [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], time_options: [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, timezone: "", title: "Rete Bayesiana", uid: "H39FJ39VMA12MD", version: 3, network: null }; export class ImportNetCtrl { /** @ngInject */ constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) { this.backendSrv = backendSrv; this.validationSrv = validationSrv; this.$location = $location; this.$routeParams = $routeParams; this.step = 1; this.nameExists = false; this.uidExists = false; this.autoGenerateUid = true; this.autoGenerateUidValue = 'auto-generated'; this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null; this.initialFolderTitle = 'Select a folder'; // check gnetId in url if ($routeParams.gnetId) { this.gnetUrl = $routeParams.gnetId; this.checkGnetDashboard(); } } static initProbs(net){ let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità for(let i=0;i<prob_nodes.length;i++) net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà } //PERSONALIZZATA onUpload(net) { this.network = net; //per l'html //riceverò sempre una net, gli devo aggiungere il template della dashboard ImportNetCtrl.initProbs(net); structure.title = net.rete; structure.network = net; //attacco il pezzo che ricevo al template console.info("onUpload Rete: "); console.info(structure.network); //creating a db let host ="http://localhost:8086"; let database ="bayesian"; const influx = new Influx(host,database); influx.createDB().then(()=>{ console.info("database created"); let nodes = []; let states = []; let probs = []; for(let i=0;i<net.nodi.length;i++){ nodes.push(net.nodi[i].id); states.push(net.nodi[i].stati); probs.push(net.nodi[i].probs); } /* return influx.insert(nodes,states,probs) .then(()=>console.info("inserted")); */ influx.insert(nodes,states,probs) .then(()=>console.info("inserted") .then(()=>{ influx.retrieve(nodes).then((data)=>{ console.info("retrived"); console.info(data); }); })); }).catch((err)=>console.info(err)); this.dash = structure; //gli do in pasto la struttura completa di dashboard + net this.dash.id = null; this.step = 2; this.inputs = []; if (this.dash.__inputs) { for (const input of this.dash.__inputs) { const inputModel = { name: input.name, label: input.label, info: input.description, value: input.value, type: input.type, pluginId: input.pluginId, options: [], }; if (input.type === 'datasource') { this.setDatasourceOptions(input, inputModel); } else if (!inputModel.info) { inputModel.info = 'Specify a string constant'; } this.inputs.push(inputModel); } } this.inputsValid = this.inputs.length === 0; this.titleChanged(); this.uidChanged(true); } setDatasourceOptions(input, inputModel) { const sources = _.filter(config.datasources, val => { return val.type === input.pluginId; }); if (sources.length === 0) { inputModel.info = 'No data sources of type ' + input.pluginName + ' found'; } else if (!inputModel.info) { inputModel.info = 'Select a ' + input.pluginName + ' data source'; } inputModel.options = sources.map(val => { return { text: val.name, value: val.name }; }); } inputValueChanged() { this.inputsValid = true; for (const input of this.inputs) { if (!input.value) { this.inputsValid = false; } } } titleChanged() { this.titleTouched = true; this.nameExists = false; this.validationSrv .validateNewDashboardName(this.folderId, this.dash.title) .then(() => { this.nameExists = false; this.hasNameValidationError = false; }) .catch(err => { if (err.type === 'EXISTING') { this.nameExists = true; }
this.hasNameValidationError = true; this.nameValidationError = err.message; }); } uidChanged(initial) { this.uidExists = false; this.hasUidValidationError = false; if (initial === true && this.dash.uid) { this.autoGenerateUidValue = 'value set'; } this.backendSrv .getDashboardByUid(this.dash.uid) .then(res => { this.uidExists = true; this.hasUidValidationError = true; this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${ res.meta.folderTitle }' has the same uid`; }) .catch(err => { err.isHandled = true; }); } onFolderChange(folder) { this.folderId = folder.id; this.titleChanged(); } onEnterFolderCreation() { this.inputsValid = false; } onExitFolderCreation() { this.inputValueChanged(); } isValid() { return this.inputsValid && this.folderId !== null; } saveDashboard() { const inputs = this.inputs.map(input => { return { name: input.name, type: input.type, pluginId: input.pluginId, value: input.value, }; }); return this.backendSrv .post('api/dashboards/import', { dashboard: this.dash, overwrite: true, inputs: inputs, folderId: this.folderId, }) .then(res => { const dashUrl = locationUtil.stripBaseFromUrl(res.importedUrl); this.$location.url(dashUrl); }); } loadJsonText() { try { this.parseError = ''; this.onUpload(JSON.parse(this.jsonText)); //invio tutto quello che ricevo } catch (err) { console.log(err); this.parseError = err.message; return; } } /* checkGnetDashboard() { this.gnetError = ''; const match = /(^\d+$)|dashboards\/(\d+)/.exec(this.gnetUrl); let dashboardId; if (match && match[1]) { dashboardId = match[1]; } else if (match && match[2]) { dashboardId = match[2]; } else { this.gnetError = 'Could not find dashboard'; } return this.backendSrv .get('api/gnet/dashboards/' + dashboardId) .then(res => { this.gnetInfo = res; // store reference to grafana.com res.json.gnetId = res.id; this.onUpload(res.json); }) .catch(err => { err.isHandled = true; this.gnetError = err.data.message || err; }); } */ back() { this.gnetUrl = ''; this.step = 1; this.gnetError = ''; this.gnetInfo = ''; } } ImportNetCtrl.templateUrl = 'components/importNet.html';
random_line_split
importNet.js
import _ from 'lodash'; import config from 'grafana/app/core/config'; import locationUtil from '../utils/location_util'; const appCtrl = require('../utils/appCtrl'); const Influx = require('../utils/Influx'); import * as $ from 'jquery'; //const url = "http://localhost:8086/query?db=mydb&q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; //const url = "http://localhost:8086/query?q=CREATE+DATABASE+dataaaa" ; /* const url = "http://localhost:8086/wirte?q=CREATE+DATABASE+dataaaa" ; const urlI = "http://localhost:8086/db/mydb/series?"; const body = { db:"mydb", name:"foo", columns:["col"], points:[[23]] }; const dataI = "cpu,host='serverA',region='us_west'+value=0.64" ; const urlS = "http://localhost:8086/query?db=mydb/"; const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; $.ajax({ url: "http://localhost:8086/query?db=mydb", headers:{ 'Authorization': 'Basic ' + btoa('admin:admin'), }, type: 'POST', data: { q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64", }, success: function(data) { //we got the response console.log(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); /* let query = 'cpu,host=serverA,region=new value=69'; $.ajax({ url:'http://localhost:8086/write?db=mydb', type:'POST', contentType:'application/octet-stream', data: query, processData: false, success: function (data) { console.info(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); */ //template struttura dashboard let structure = { __inputs: [], __requires: [ { type: "grafana", id: "grafana", name: "Grafana", version: "5.4.0" } ], annotations: { list: [ { builtIn: 1, datasource: "-- Grafana --", enable: true, hide: true, iconColor: "rgba(0, 211, 255, 1)", name: "Annotations & Alerts", type: "dashboard" } ] }, editable: false, gnetId: null, graphTooltip: 0, id: null, links: [], panels: [ { type: "text", title: "Warning from DreamCorp", gridPos: { x: 4, y: 0, w: 16, h: 8 }, id: 0, mode: "markdown", content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism" } ], schemaVersion: 16, style: "dark", tags: ["bayesian-network"], templating: { list: [ { allValue: null, current: { text: "nodo 1", value: "nodo 1" }, hide: 0, includeAll: false, label: "Nodo", multi: false, name: "Nodo", options: [ { selected: true, text: "nodo 1", value: "nodo 1" } ], query: "nodo 1", skipUrlSync: false, type: "custom" } ] }, time: { from: "now-6h", to: "now" }, timepicker: { refresh_intervals: [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], time_options: [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, timezone: "", title: "Rete Bayesiana", uid: "H39FJ39VMA12MD", version: 3, network: null }; export class ImportNetCtrl { /** @ngInject */ constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) { this.backendSrv = backendSrv; this.validationSrv = validationSrv; this.$location = $location; this.$routeParams = $routeParams; this.step = 1; this.nameExists = false; this.uidExists = false; this.autoGenerateUid = true; this.autoGenerateUidValue = 'auto-generated'; this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null; this.initialFolderTitle = 'Select a folder'; // check gnetId in url if ($routeParams.gnetId) { this.gnetUrl = $routeParams.gnetId; this.checkGnetDashboard(); } } static initProbs(net){ let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità for(let i=0;i<prob_nodes.length;i++) net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà } //PERSONALIZZATA onUpload(net) {
setDatasourceOptions(input, inputModel) { const sources = _.filter(config.datasources, val => { return val.type === input.pluginId; }); if (sources.length === 0) { inputModel.info = 'No data sources of type ' + input.pluginName + ' found'; } else if (!inputModel.info) { inputModel.info = 'Select a ' + input.pluginName + ' data source'; } inputModel.options = sources.map(val => { return { text: val.name, value: val.name }; }); } inputValueChanged() { this.inputsValid = true; for (const input of this.inputs) { if (!input.value) { this.inputsValid = false; } } } titleChanged() { this.titleTouched = true; this.nameExists = false; this.validationSrv .validateNewDashboardName(this.folderId, this.dash.title) .then(() => { this.nameExists = false; this.hasNameValidationError = false; }) .catch(err => { if (err.type === 'EXISTING') { this.nameExists = true; } this.hasNameValidationError = true; this.nameValidationError = err.message; }); } uidChanged(initial) { this.uidExists = false; this.hasUidValidationError = false; if (initial === true && this.dash.uid) { this.autoGenerateUidValue = 'value set'; } this.backendSrv .getDashboardByUid(this.dash.uid) .then(res => { this.uidExists = true; this.hasUidValidationError = true; this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${ res.meta.folderTitle }' has the same uid`; }) .catch(err => { err.isHandled = true; }); } onFolderChange(folder) { this.folderId = folder.id; this.titleChanged(); } onEnterFolderCreation() { this.inputsValid = false; } onExitFolderCreation() { this.inputValueChanged(); } isValid() { return this.inputsValid && this.folderId !== null; } saveDashboard() { const inputs = this.inputs.map(input => { return { name: input.name, type: input.type, pluginId: input.pluginId, value: input.value, }; }); return this.backendSrv .post('api/dashboards/import', { dashboard: this.dash, overwrite: true, inputs: inputs, folderId: this.folderId, }) .then(res => { const dashUrl = locationUtil.stripBaseFromUrl(res.importedUrl); this.$location.url(dashUrl); }); } loadJsonText() { try { this.parseError = ''; this.onUpload(JSON.parse(this.jsonText)); //invio tutto quello che ricevo } catch (err) { console.log(err); this.parseError = err.message; return; } } /* checkGnetDashboard() { this.gnetError = ''; const match = /(^\d+$)|dashboards\/(\d+)/.exec(this.gnetUrl); let dashboardId; if (match && match[1]) { dashboardId = match[1]; } else if (match && match[2]) { dashboardId = match[2]; } else { this.gnetError = 'Could not find dashboard'; } return this.backendSrv .get('api/gnet/dashboards/' + dashboardId) .then(res => { this.gnetInfo = res; // store reference to grafana.com res.json.gnetId = res.id; this.onUpload(res.json); }) .catch(err => { err.isHandled = true; this.gnetError = err.data.message || err; }); } */ back() { this.gnetUrl = ''; this.step = 1; this.gnetError = ''; this.gnetInfo = ''; } } ImportNetCtrl.templateUrl = 'components/importNet.html';
this.network = net; //per l'html //riceverò sempre una net, gli devo aggiungere il template della dashboard ImportNetCtrl.initProbs(net); structure.title = net.rete; structure.network = net; //attacco il pezzo che ricevo al template console.info("onUpload Rete: "); console.info(structure.network); //creating a db let host ="http://localhost:8086"; let database ="bayesian"; const influx = new Influx(host,database); influx.createDB().then(()=>{ console.info("database created"); let nodes = []; let states = []; let probs = []; for(let i=0;i<net.nodi.length;i++){ nodes.push(net.nodi[i].id); states.push(net.nodi[i].stati); probs.push(net.nodi[i].probs); } /* return influx.insert(nodes,states,probs) .then(()=>console.info("inserted")); */ influx.insert(nodes,states,probs) .then(()=>console.info("inserted") .then(()=>{ influx.retrieve(nodes).then((data)=>{ console.info("retrived"); console.info(data); }); })); }).catch((err)=>console.info(err)); this.dash = structure; //gli do in pasto la struttura completa di dashboard + net this.dash.id = null; this.step = 2; this.inputs = []; if (this.dash.__inputs) { for (const input of this.dash.__inputs) { const inputModel = { name: input.name, label: input.label, info: input.description, value: input.value, type: input.type, pluginId: input.pluginId, options: [], }; if (input.type === 'datasource') { this.setDatasourceOptions(input, inputModel); } else if (!inputModel.info) { inputModel.info = 'Specify a string constant'; } this.inputs.push(inputModel); } } this.inputsValid = this.inputs.length === 0; this.titleChanged(); this.uidChanged(true); }
identifier_body
importNet.js
import _ from 'lodash'; import config from 'grafana/app/core/config'; import locationUtil from '../utils/location_util'; const appCtrl = require('../utils/appCtrl'); const Influx = require('../utils/Influx'); import * as $ from 'jquery'; //const url = "http://localhost:8086/query?db=mydb&q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; //const url = "http://localhost:8086/query?q=CREATE+DATABASE+dataaaa" ; /* const url = "http://localhost:8086/wirte?q=CREATE+DATABASE+dataaaa" ; const urlI = "http://localhost:8086/db/mydb/series?"; const body = { db:"mydb", name:"foo", columns:["col"], points:[[23]] }; const dataI = "cpu,host='serverA',region='us_west'+value=0.64" ; const urlS = "http://localhost:8086/query?db=mydb/"; const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ; $.ajax({ url: "http://localhost:8086/query?db=mydb", headers:{ 'Authorization': 'Basic ' + btoa('admin:admin'), }, type: 'POST', data: { q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64", }, success: function(data) { //we got the response console.log(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); /* let query = 'cpu,host=serverA,region=new value=69'; $.ajax({ url:'http://localhost:8086/write?db=mydb', type:'POST', contentType:'application/octet-stream', data: query, processData: false, success: function (data) { console.info(data); }, error: function(test, status, exception) { console.log("Error: " + exception); } }); */ //template struttura dashboard let structure = { __inputs: [], __requires: [ { type: "grafana", id: "grafana", name: "Grafana", version: "5.4.0" } ], annotations: { list: [ { builtIn: 1, datasource: "-- Grafana --", enable: true, hide: true, iconColor: "rgba(0, 211, 255, 1)", name: "Annotations & Alerts", type: "dashboard" } ] }, editable: false, gnetId: null, graphTooltip: 0, id: null, links: [], panels: [ { type: "text", title: "Warning from DreamCorp", gridPos: { x: 4, y: 0, w: 16, h: 8 }, id: 0, mode: "markdown", content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism" } ], schemaVersion: 16, style: "dark", tags: ["bayesian-network"], templating: { list: [ { allValue: null, current: { text: "nodo 1", value: "nodo 1" }, hide: 0, includeAll: false, label: "Nodo", multi: false, name: "Nodo", options: [ { selected: true, text: "nodo 1", value: "nodo 1" } ], query: "nodo 1", skipUrlSync: false, type: "custom" } ] }, time: { from: "now-6h", to: "now" }, timepicker: { refresh_intervals: [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], time_options: [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, timezone: "", title: "Rete Bayesiana", uid: "H39FJ39VMA12MD", version: 3, network: null }; export class ImportNetCtrl { /** @ngInject */ constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) { this.backendSrv = backendSrv; this.validationSrv = validationSrv; this.$location = $location; this.$routeParams = $routeParams; this.step = 1; this.nameExists = false; this.uidExists = false; this.autoGenerateUid = true; this.autoGenerateUidValue = 'auto-generated'; this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null; this.initialFolderTitle = 'Select a folder'; // check gnetId in url if ($routeParams.gnetId) { this.gnetUrl = $routeParams.gnetId; this.checkGnetDashboard(); } } static initProbs(net){ let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità for(let i=0;i<prob_nodes.length;i++) net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà } //PERSONALIZZATA onUpload(net) { this.network = net; //per l'html //riceverò sempre una net, gli devo aggiungere il template della dashboard ImportNetCtrl.initProbs(net); structure.title = net.rete; structure.network = net; //attacco il pezzo che ricevo al template console.info("onUpload Rete: "); console.info(structure.network); //creating a db let host ="http://localhost:8086"; let database ="bayesian"; const influx = new Influx(host,database); influx.createDB().then(()=>{ console.info("database created"); let nodes = []; let states = []; let probs = []; for(let i=0;i<net.nodi.length;i++){ nodes.push(net.nodi[i].id); states.push(net.nodi[i].stati); probs.push(net.nodi[i].probs); } /* return influx.insert(nodes,states,probs) .then(()=>console.info("inserted")); */ influx.insert(nodes,states,probs) .then(()=>console.info("inserted") .then(()=>{ influx.retrieve(nodes).then((data)=>{ console.info("retrived"); console.info(data); }); })); }).catch((err)=>console.info(err)); this.dash = structure; //gli do in pasto la struttura completa di dashboard + net this.dash.id = null; this.step = 2; this.inputs = []; if (this.dash.__inputs) { for (const input of this.dash.__inputs) { const inputModel = { name: input.name, label: input.label, info: input.description, value: input.value, type: input.type, pluginId: input.pluginId, options: [], }; if (input.type === 'datasource') { this.setDatasourceOptions(input, inputModel); } else if (!inputModel.info) { inputModel.info = 'Specify a string constant'; } this.inputs.push(inputModel); } } this.inputsValid = this.inputs.length === 0; this.titleChanged(); this.uidChanged(true); } setDatasourceOptions(input, inputModel) { const sources = _.filter(config.datasources, val => { return val.type === input.pluginId; }); if (sources.length === 0) { inputModel.info = 'No data sources of type ' + input.pluginName + ' found'; } else if (!inputModel.info) { inputModel.info = 'Select a ' + input.pluginName + ' data source'; } inputModel.options = sources.map(val => { return { text: val.name, value: val.name }; }); } inputValueChanged() { this.inputsValid = true; for (const input of this.inputs) { if (!input.value) { this.inputsValid = false; } } } titleChanged() { this.titleTouched = true; this.nameExists = false; this.validationSrv .validateNewDashboardName(this.folderId, this.dash.title) .then(() => { this.nameExists = false; this.hasNameValidationError = false; }) .catch(err => { if (err.type === 'EXISTING') { this.nameExists = true; } this.hasNameValidationError = true; this.nameValidationError = err.message; }); } uidChanged(initial) { this.uidExists = false; this.hasUidValidationError = false; if (initial === true && this.dash.uid) { this.autoGenerateUidValue = 'value set'; } this.backendSrv .getDashboardByUid(this.dash.uid) .then(res => { this.uidExists = true; this.hasUidValidationError = true; this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${ res.meta.folderTitle }' has the same uid`; }) .catch(err => { err.isHandled = true; }); } onFolderChange(folder) { this.folderId = folder.id; this.titleChanged(); } onEnterFolderCreation() { this.inputsValid = false; } onExitFolderCreation() { this.inputValueChanged(); } isV
{ return this.inputsValid && this.folderId !== null; } saveDashboard() { const inputs = this.inputs.map(input => { return { name: input.name, type: input.type, pluginId: input.pluginId, value: input.value, }; }); return this.backendSrv .post('api/dashboards/import', { dashboard: this.dash, overwrite: true, inputs: inputs, folderId: this.folderId, }) .then(res => { const dashUrl = locationUtil.stripBaseFromUrl(res.importedUrl); this.$location.url(dashUrl); }); } loadJsonText() { try { this.parseError = ''; this.onUpload(JSON.parse(this.jsonText)); //invio tutto quello che ricevo } catch (err) { console.log(err); this.parseError = err.message; return; } } /* checkGnetDashboard() { this.gnetError = ''; const match = /(^\d+$)|dashboards\/(\d+)/.exec(this.gnetUrl); let dashboardId; if (match && match[1]) { dashboardId = match[1]; } else if (match && match[2]) { dashboardId = match[2]; } else { this.gnetError = 'Could not find dashboard'; } return this.backendSrv .get('api/gnet/dashboards/' + dashboardId) .then(res => { this.gnetInfo = res; // store reference to grafana.com res.json.gnetId = res.id; this.onUpload(res.json); }) .catch(err => { err.isHandled = true; this.gnetError = err.data.message || err; }); } */ back() { this.gnetUrl = ''; this.step = 1; this.gnetError = ''; this.gnetInfo = ''; } } ImportNetCtrl.templateUrl = 'components/importNet.html';
alid()
identifier_name
dataset_RAF.py
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from cv2 import cv2 from tqdm import tqdm import os import pickle import numpy as np import csv import sys from collections import defaultdict from dataset_utils import * sys.path.append("../training") from dataset_tools import enclosing_square, add_margin, DataGenerator EXT_ROOT = os.path.dirname(os.path.abspath(__file__)) rafdb_labels = { "age_group": { "0-3": 0, "4-19": 1, "20-39": 2, "40-69": 3, "70+":4 }, "race": { "Caucasian": 0, "African-American": 1, "Asian": 2 } } # converted labels rafDBmeta = defaultdict(dict) # multitask labels rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose rafDBdata = None # dict({image_path: ... }) # for ensembling purpose # ORDER: Gender, Age, Ethnicity, Emotion def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False): global rafDBdata if rafDBdata is None: rafDBdata = dict() i, errors = 0, defaultdict(set) for image_path, image_meta in input_meta.items(): identity = image_meta["identity"] roi = None # aligned image, roi is the image size rafDBdata[image_path] = { "roi" : roi, "identity" : identity, "gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE, "age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE, "ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE, "emotion": get_emotion_label(image_meta["emotion"]), "sample_num" : i } i += 1 print("Metadata:", len(rafDBdata)) if errors: print("Gender errors", errors["gender"]) print("Age errors", errors["age"]) print("Ethnicity errors", errors["ethnicity"]) # Labelling def
(gender): if gender == 'male': return LABELS["gender"]["male"] elif gender == 'female': return LABELS["gender"]["female"] return MASK_VALUE def get_age_group_label(age_group_text): return rafdb_labels["age_group"][age_group_text] def get_ethnicity_label(ethnicity_text): return rafdb_labels["race"][ethnicity_text] def get_emotion_label(emotion): return LABELS["emotion"][emotion] # Load from csv def _load_meta_from_csv(csv_meta, output_dict): data = readcsv(csv_meta) for row in data: output_dict[row[0]]["gender"] = row[1] output_dict[row[0]]["age_group"] = row[2] output_dict[row[0]]["race"] = row[3] output_dict[row[0]]["emotion"] = row[4] output_dict[row[0]]["identity"] = row[0].split("_")[1] def get_partition(identity_label): global rafDBpartition try: faces, partition = rafDBpartition[identity_label] rafDBpartition[identity_label] = (faces + 1, partition) except KeyError: # split 20/80 stratified by identity l = (len(rafDBpartition) - 1) % 10 if l == 0 or l == 1: partition = PARTITION_VAL else: partition = PARTITION_TRAIN rafDBpartition[identity_label] = (1, partition) return partition def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None): data = list() discarded_items = defaultdict(list) for image_path, image_meta in tqdm(rafDBdata.items()): path = os.path.join(imagesdir, image_path) if ALIGNED: path = os.path.splitext(path) path = path[0] + "_aligned" + path[1] identity = image_meta["identity"] image = cv2.imread(path) if image is None: print("WARNING! Unable to read {}".format(image_path)) print(" - At {}".format(path)) discarded_items["unavailable_image"].append(identity) continue if np.max(image) == np.min(image): print("Blank image {}".format(image_path)) discarded_items["blank_image"].append(identity) continue sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity) gender = rafDBdata[image_path]["gender"] age = rafDBdata[image_path]["age_group"] ethnicity = rafDBdata[image_path]["ethnicity"] emotion = rafDBdata[image_path]["emotion"] labels = (gender, age, ethnicity, emotion) roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"] sample = { 'img': path, 'label': labels, 'roi': roi, 'part': sample_partition } data.append(sample) if debug_max_num_samples is not None and len(data) >= debug_max_num_samples: print("Stopped loading. Debug max samples: ", debug_max_num_samples) break print("Data loaded. {} samples".format(len(data))) print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"])) print("Discarded for blank image: ", len(discarded_items["blank_image"])) return data ALIGNED = True class RAFDBMulti: def __init__(self, partition='train', imagesdir='data/RAF-DB/basic/Image/{aligned}', csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv', target_shape=(112, 112, 3), augment=True, custom_augmentation=None, preprocessing='full_normalization', debug_max_num_samples=None, include_gender=False, include_age_group=False, include_race=False, **kwargs): partition_label = partition_select(partition) self.target_shape = target_shape self.custom_augmentation = custom_augmentation self.augment = augment self.gen = None self.preprocessing = preprocessing print('Loading %s data...' % partition) num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else '' cache_task = "{}{}{}_emotion".format( "_withgender" if include_gender else "", "_withagegroup" if include_age_group else "", "_withrace" if include_race else "" ) cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples) cache_file_name = os.path.join("cache", cache_file_name) cache_file_name = os.path.join(EXT_ROOT, cache_file_name) print("cache file name %s" % cache_file_name) try: with open(cache_file_name, 'rb') as f: self.data = pickle.load(f)[:debug_max_num_samples] print("Data loaded. %d samples, from cache" % (len(self.data))) except FileNotFoundError: print("Loading %s data from scratch" % partition) load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test" imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original")) csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition)) _load_meta_from_csv(csvmeta, rafDBmeta) _load_traits(rafDBmeta, include_gender, include_age_group, include_race) print("Loading {} dataset".format(partition)) loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples) print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label) if partition.startswith('test'): self.data = loaded_data else: self.data = [x for x in loaded_data if x['part'] == partition_label] with open(cache_file_name, 'wb') as f: print("Pickle dumping") pickle.dump(self.data, f) def get_data(self): return self.data def get_num_samples(self): return len(self.data) def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False): if self.gen is None: self.gen = DataGenerator(data=self.data, target_shape=self.target_shape, with_augmentation=self.augment, custom_augmentation=self.custom_augmentation, batch_size=batch_size, num_classes=self.get_num_classes(), preprocessing=self.preprocessing, fullinfo=fullinfo, doublelabel=doublelabel) return self.gen def get_num_classes(self): return CLASSES def test_multi(dataset="test", debug_samples=None): if dataset.startswith("train") or dataset.startswith("val"): print(dataset, debug_samples if debug_samples is not None else '') dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dt.get_generator() else: dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dv.get_generator() i = 0 for batch in tqdm(gen): for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]): facemax = np.max(im) facemin = np.min(im) print("Sample:", i) print("Labels:", gender, age, ethnicity, emotion) print("Gender:", verbose_gender(gender), "- Age:", verbose_age(age), "- Ethnicity:", verbose_ethnicity(ethnicity), "- Emotion:", verbose_emotion(emotion)) im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8) cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255)) cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im) i += 1 if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() return if '__main__' == __name__: test_multi("train") test_multi("val") test_multi("test")
get_gender_label
identifier_name
dataset_RAF.py
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from cv2 import cv2 from tqdm import tqdm import os import pickle import numpy as np import csv import sys from collections import defaultdict from dataset_utils import * sys.path.append("../training") from dataset_tools import enclosing_square, add_margin, DataGenerator EXT_ROOT = os.path.dirname(os.path.abspath(__file__)) rafdb_labels = { "age_group": { "0-3": 0, "4-19": 1, "20-39": 2, "40-69": 3, "70+":4 }, "race": { "Caucasian": 0, "African-American": 1, "Asian": 2 } } # converted labels rafDBmeta = defaultdict(dict) # multitask labels rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose rafDBdata = None # dict({image_path: ... }) # for ensembling purpose # ORDER: Gender, Age, Ethnicity, Emotion def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False): global rafDBdata if rafDBdata is None: rafDBdata = dict() i, errors = 0, defaultdict(set) for image_path, image_meta in input_meta.items(): identity = image_meta["identity"] roi = None # aligned image, roi is the image size rafDBdata[image_path] = { "roi" : roi, "identity" : identity, "gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE, "age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE, "ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE, "emotion": get_emotion_label(image_meta["emotion"]), "sample_num" : i } i += 1 print("Metadata:", len(rafDBdata)) if errors: print("Gender errors", errors["gender"]) print("Age errors", errors["age"]) print("Ethnicity errors", errors["ethnicity"]) # Labelling def get_gender_label(gender): if gender == 'male': return LABELS["gender"]["male"] elif gender == 'female': return LABELS["gender"]["female"] return MASK_VALUE def get_age_group_label(age_group_text): return rafdb_labels["age_group"][age_group_text] def get_ethnicity_label(ethnicity_text): return rafdb_labels["race"][ethnicity_text] def get_emotion_label(emotion): return LABELS["emotion"][emotion] # Load from csv def _load_meta_from_csv(csv_meta, output_dict): data = readcsv(csv_meta) for row in data: output_dict[row[0]]["gender"] = row[1] output_dict[row[0]]["age_group"] = row[2] output_dict[row[0]]["race"] = row[3] output_dict[row[0]]["emotion"] = row[4] output_dict[row[0]]["identity"] = row[0].split("_")[1] def get_partition(identity_label): global rafDBpartition try: faces, partition = rafDBpartition[identity_label] rafDBpartition[identity_label] = (faces + 1, partition) except KeyError: # split 20/80 stratified by identity l = (len(rafDBpartition) - 1) % 10 if l == 0 or l == 1: partition = PARTITION_VAL else: partition = PARTITION_TRAIN rafDBpartition[identity_label] = (1, partition) return partition def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None): data = list() discarded_items = defaultdict(list) for image_path, image_meta in tqdm(rafDBdata.items()): path = os.path.join(imagesdir, image_path) if ALIGNED: path = os.path.splitext(path) path = path[0] + "_aligned" + path[1] identity = image_meta["identity"] image = cv2.imread(path) if image is None: print("WARNING! Unable to read {}".format(image_path)) print(" - At {}".format(path)) discarded_items["unavailable_image"].append(identity) continue if np.max(image) == np.min(image): print("Blank image {}".format(image_path)) discarded_items["blank_image"].append(identity) continue sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity) gender = rafDBdata[image_path]["gender"] age = rafDBdata[image_path]["age_group"] ethnicity = rafDBdata[image_path]["ethnicity"] emotion = rafDBdata[image_path]["emotion"] labels = (gender, age, ethnicity, emotion) roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"] sample = { 'img': path, 'label': labels, 'roi': roi, 'part': sample_partition } data.append(sample) if debug_max_num_samples is not None and len(data) >= debug_max_num_samples: print("Stopped loading. Debug max samples: ", debug_max_num_samples) break print("Data loaded. {} samples".format(len(data))) print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"])) print("Discarded for blank image: ", len(discarded_items["blank_image"])) return data ALIGNED = True class RAFDBMulti:
def test_multi(dataset="test", debug_samples=None): if dataset.startswith("train") or dataset.startswith("val"): print(dataset, debug_samples if debug_samples is not None else '') dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dt.get_generator() else: dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dv.get_generator() i = 0 for batch in tqdm(gen): for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]): facemax = np.max(im) facemin = np.min(im) print("Sample:", i) print("Labels:", gender, age, ethnicity, emotion) print("Gender:", verbose_gender(gender), "- Age:", verbose_age(age), "- Ethnicity:", verbose_ethnicity(ethnicity), "- Emotion:", verbose_emotion(emotion)) im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8) cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255)) cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im) i += 1 if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() return if '__main__' == __name__: test_multi("train") test_multi("val") test_multi("test")
def __init__(self, partition='train', imagesdir='data/RAF-DB/basic/Image/{aligned}', csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv', target_shape=(112, 112, 3), augment=True, custom_augmentation=None, preprocessing='full_normalization', debug_max_num_samples=None, include_gender=False, include_age_group=False, include_race=False, **kwargs): partition_label = partition_select(partition) self.target_shape = target_shape self.custom_augmentation = custom_augmentation self.augment = augment self.gen = None self.preprocessing = preprocessing print('Loading %s data...' % partition) num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else '' cache_task = "{}{}{}_emotion".format( "_withgender" if include_gender else "", "_withagegroup" if include_age_group else "", "_withrace" if include_race else "" ) cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples) cache_file_name = os.path.join("cache", cache_file_name) cache_file_name = os.path.join(EXT_ROOT, cache_file_name) print("cache file name %s" % cache_file_name) try: with open(cache_file_name, 'rb') as f: self.data = pickle.load(f)[:debug_max_num_samples] print("Data loaded. %d samples, from cache" % (len(self.data))) except FileNotFoundError: print("Loading %s data from scratch" % partition) load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test" imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original")) csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition)) _load_meta_from_csv(csvmeta, rafDBmeta) _load_traits(rafDBmeta, include_gender, include_age_group, include_race) print("Loading {} dataset".format(partition)) loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples) print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label) if partition.startswith('test'): self.data = loaded_data else: self.data = [x for x in loaded_data if x['part'] == partition_label] with open(cache_file_name, 'wb') as f: print("Pickle dumping") pickle.dump(self.data, f) def get_data(self): return self.data def get_num_samples(self): return len(self.data) def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False): if self.gen is None: self.gen = DataGenerator(data=self.data, target_shape=self.target_shape, with_augmentation=self.augment, custom_augmentation=self.custom_augmentation, batch_size=batch_size, num_classes=self.get_num_classes(), preprocessing=self.preprocessing, fullinfo=fullinfo, doublelabel=doublelabel) return self.gen def get_num_classes(self): return CLASSES
identifier_body
dataset_RAF.py
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from cv2 import cv2 from tqdm import tqdm import os import pickle import numpy as np import csv import sys from collections import defaultdict from dataset_utils import * sys.path.append("../training") from dataset_tools import enclosing_square, add_margin, DataGenerator EXT_ROOT = os.path.dirname(os.path.abspath(__file__)) rafdb_labels = { "age_group": { "0-3": 0, "4-19": 1, "20-39": 2, "40-69": 3, "70+":4 }, "race": { "Caucasian": 0, "African-American": 1, "Asian": 2 } } # converted labels rafDBmeta = defaultdict(dict) # multitask labels rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose rafDBdata = None # dict({image_path: ... }) # for ensembling purpose # ORDER: Gender, Age, Ethnicity, Emotion def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False): global rafDBdata if rafDBdata is None: rafDBdata = dict() i, errors = 0, defaultdict(set) for image_path, image_meta in input_meta.items(): identity = image_meta["identity"] roi = None # aligned image, roi is the image size rafDBdata[image_path] = { "roi" : roi, "identity" : identity, "gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE, "age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE, "ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE, "emotion": get_emotion_label(image_meta["emotion"]), "sample_num" : i } i += 1 print("Metadata:", len(rafDBdata)) if errors: print("Gender errors", errors["gender"]) print("Age errors", errors["age"]) print("Ethnicity errors", errors["ethnicity"]) # Labelling def get_gender_label(gender): if gender == 'male': return LABELS["gender"]["male"] elif gender == 'female': return LABELS["gender"]["female"] return MASK_VALUE def get_age_group_label(age_group_text): return rafdb_labels["age_group"][age_group_text] def get_ethnicity_label(ethnicity_text): return rafdb_labels["race"][ethnicity_text] def get_emotion_label(emotion): return LABELS["emotion"][emotion] # Load from csv
output_dict[row[0]]["gender"] = row[1] output_dict[row[0]]["age_group"] = row[2] output_dict[row[0]]["race"] = row[3] output_dict[row[0]]["emotion"] = row[4] output_dict[row[0]]["identity"] = row[0].split("_")[1] def get_partition(identity_label): global rafDBpartition try: faces, partition = rafDBpartition[identity_label] rafDBpartition[identity_label] = (faces + 1, partition) except KeyError: # split 20/80 stratified by identity l = (len(rafDBpartition) - 1) % 10 if l == 0 or l == 1: partition = PARTITION_VAL else: partition = PARTITION_TRAIN rafDBpartition[identity_label] = (1, partition) return partition def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None): data = list() discarded_items = defaultdict(list) for image_path, image_meta in tqdm(rafDBdata.items()): path = os.path.join(imagesdir, image_path) if ALIGNED: path = os.path.splitext(path) path = path[0] + "_aligned" + path[1] identity = image_meta["identity"] image = cv2.imread(path) if image is None: print("WARNING! Unable to read {}".format(image_path)) print(" - At {}".format(path)) discarded_items["unavailable_image"].append(identity) continue if np.max(image) == np.min(image): print("Blank image {}".format(image_path)) discarded_items["blank_image"].append(identity) continue sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity) gender = rafDBdata[image_path]["gender"] age = rafDBdata[image_path]["age_group"] ethnicity = rafDBdata[image_path]["ethnicity"] emotion = rafDBdata[image_path]["emotion"] labels = (gender, age, ethnicity, emotion) roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"] sample = { 'img': path, 'label': labels, 'roi': roi, 'part': sample_partition } data.append(sample) if debug_max_num_samples is not None and len(data) >= debug_max_num_samples: print("Stopped loading. Debug max samples: ", debug_max_num_samples) break print("Data loaded. {} samples".format(len(data))) print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"])) print("Discarded for blank image: ", len(discarded_items["blank_image"])) return data ALIGNED = True class RAFDBMulti: def __init__(self, partition='train', imagesdir='data/RAF-DB/basic/Image/{aligned}', csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv', target_shape=(112, 112, 3), augment=True, custom_augmentation=None, preprocessing='full_normalization', debug_max_num_samples=None, include_gender=False, include_age_group=False, include_race=False, **kwargs): partition_label = partition_select(partition) self.target_shape = target_shape self.custom_augmentation = custom_augmentation self.augment = augment self.gen = None self.preprocessing = preprocessing print('Loading %s data...' % partition) num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else '' cache_task = "{}{}{}_emotion".format( "_withgender" if include_gender else "", "_withagegroup" if include_age_group else "", "_withrace" if include_race else "" ) cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples) cache_file_name = os.path.join("cache", cache_file_name) cache_file_name = os.path.join(EXT_ROOT, cache_file_name) print("cache file name %s" % cache_file_name) try: with open(cache_file_name, 'rb') as f: self.data = pickle.load(f)[:debug_max_num_samples] print("Data loaded. %d samples, from cache" % (len(self.data))) except FileNotFoundError: print("Loading %s data from scratch" % partition) load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test" imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original")) csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition)) _load_meta_from_csv(csvmeta, rafDBmeta) _load_traits(rafDBmeta, include_gender, include_age_group, include_race) print("Loading {} dataset".format(partition)) loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples) print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label) if partition.startswith('test'): self.data = loaded_data else: self.data = [x for x in loaded_data if x['part'] == partition_label] with open(cache_file_name, 'wb') as f: print("Pickle dumping") pickle.dump(self.data, f) def get_data(self): return self.data def get_num_samples(self): return len(self.data) def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False): if self.gen is None: self.gen = DataGenerator(data=self.data, target_shape=self.target_shape, with_augmentation=self.augment, custom_augmentation=self.custom_augmentation, batch_size=batch_size, num_classes=self.get_num_classes(), preprocessing=self.preprocessing, fullinfo=fullinfo, doublelabel=doublelabel) return self.gen def get_num_classes(self): return CLASSES def test_multi(dataset="test", debug_samples=None): if dataset.startswith("train") or dataset.startswith("val"): print(dataset, debug_samples if debug_samples is not None else '') dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dt.get_generator() else: dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dv.get_generator() i = 0 for batch in tqdm(gen): for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]): facemax = np.max(im) facemin = np.min(im) print("Sample:", i) print("Labels:", gender, age, ethnicity, emotion) print("Gender:", verbose_gender(gender), "- Age:", verbose_age(age), "- Ethnicity:", verbose_ethnicity(ethnicity), "- Emotion:", verbose_emotion(emotion)) im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8) cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255)) cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im) i += 1 if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() return if '__main__' == __name__: test_multi("train") test_multi("val") test_multi("test")
def _load_meta_from_csv(csv_meta, output_dict): data = readcsv(csv_meta) for row in data:
random_line_split
dataset_RAF.py
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from cv2 import cv2 from tqdm import tqdm import os import pickle import numpy as np import csv import sys from collections import defaultdict from dataset_utils import * sys.path.append("../training") from dataset_tools import enclosing_square, add_margin, DataGenerator EXT_ROOT = os.path.dirname(os.path.abspath(__file__)) rafdb_labels = { "age_group": { "0-3": 0, "4-19": 1, "20-39": 2, "40-69": 3, "70+":4 }, "race": { "Caucasian": 0, "African-American": 1, "Asian": 2 } } # converted labels rafDBmeta = defaultdict(dict) # multitask labels rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose rafDBdata = None # dict({image_path: ... }) # for ensembling purpose # ORDER: Gender, Age, Ethnicity, Emotion def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False): global rafDBdata if rafDBdata is None: rafDBdata = dict() i, errors = 0, defaultdict(set) for image_path, image_meta in input_meta.items(): identity = image_meta["identity"] roi = None # aligned image, roi is the image size rafDBdata[image_path] = { "roi" : roi, "identity" : identity, "gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE, "age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE, "ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE, "emotion": get_emotion_label(image_meta["emotion"]), "sample_num" : i } i += 1 print("Metadata:", len(rafDBdata)) if errors:
# Labelling def get_gender_label(gender): if gender == 'male': return LABELS["gender"]["male"] elif gender == 'female': return LABELS["gender"]["female"] return MASK_VALUE def get_age_group_label(age_group_text): return rafdb_labels["age_group"][age_group_text] def get_ethnicity_label(ethnicity_text): return rafdb_labels["race"][ethnicity_text] def get_emotion_label(emotion): return LABELS["emotion"][emotion] # Load from csv def _load_meta_from_csv(csv_meta, output_dict): data = readcsv(csv_meta) for row in data: output_dict[row[0]]["gender"] = row[1] output_dict[row[0]]["age_group"] = row[2] output_dict[row[0]]["race"] = row[3] output_dict[row[0]]["emotion"] = row[4] output_dict[row[0]]["identity"] = row[0].split("_")[1] def get_partition(identity_label): global rafDBpartition try: faces, partition = rafDBpartition[identity_label] rafDBpartition[identity_label] = (faces + 1, partition) except KeyError: # split 20/80 stratified by identity l = (len(rafDBpartition) - 1) % 10 if l == 0 or l == 1: partition = PARTITION_VAL else: partition = PARTITION_TRAIN rafDBpartition[identity_label] = (1, partition) return partition def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None): data = list() discarded_items = defaultdict(list) for image_path, image_meta in tqdm(rafDBdata.items()): path = os.path.join(imagesdir, image_path) if ALIGNED: path = os.path.splitext(path) path = path[0] + "_aligned" + path[1] identity = image_meta["identity"] image = cv2.imread(path) if image is None: print("WARNING! Unable to read {}".format(image_path)) print(" - At {}".format(path)) discarded_items["unavailable_image"].append(identity) continue if np.max(image) == np.min(image): print("Blank image {}".format(image_path)) discarded_items["blank_image"].append(identity) continue sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity) gender = rafDBdata[image_path]["gender"] age = rafDBdata[image_path]["age_group"] ethnicity = rafDBdata[image_path]["ethnicity"] emotion = rafDBdata[image_path]["emotion"] labels = (gender, age, ethnicity, emotion) roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"] sample = { 'img': path, 'label': labels, 'roi': roi, 'part': sample_partition } data.append(sample) if debug_max_num_samples is not None and len(data) >= debug_max_num_samples: print("Stopped loading. Debug max samples: ", debug_max_num_samples) break print("Data loaded. {} samples".format(len(data))) print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"])) print("Discarded for blank image: ", len(discarded_items["blank_image"])) return data ALIGNED = True class RAFDBMulti: def __init__(self, partition='train', imagesdir='data/RAF-DB/basic/Image/{aligned}', csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv', target_shape=(112, 112, 3), augment=True, custom_augmentation=None, preprocessing='full_normalization', debug_max_num_samples=None, include_gender=False, include_age_group=False, include_race=False, **kwargs): partition_label = partition_select(partition) self.target_shape = target_shape self.custom_augmentation = custom_augmentation self.augment = augment self.gen = None self.preprocessing = preprocessing print('Loading %s data...' % partition) num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else '' cache_task = "{}{}{}_emotion".format( "_withgender" if include_gender else "", "_withagegroup" if include_age_group else "", "_withrace" if include_race else "" ) cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples) cache_file_name = os.path.join("cache", cache_file_name) cache_file_name = os.path.join(EXT_ROOT, cache_file_name) print("cache file name %s" % cache_file_name) try: with open(cache_file_name, 'rb') as f: self.data = pickle.load(f)[:debug_max_num_samples] print("Data loaded. %d samples, from cache" % (len(self.data))) except FileNotFoundError: print("Loading %s data from scratch" % partition) load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test" imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original")) csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition)) _load_meta_from_csv(csvmeta, rafDBmeta) _load_traits(rafDBmeta, include_gender, include_age_group, include_race) print("Loading {} dataset".format(partition)) loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples) print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label) if partition.startswith('test'): self.data = loaded_data else: self.data = [x for x in loaded_data if x['part'] == partition_label] with open(cache_file_name, 'wb') as f: print("Pickle dumping") pickle.dump(self.data, f) def get_data(self): return self.data def get_num_samples(self): return len(self.data) def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False): if self.gen is None: self.gen = DataGenerator(data=self.data, target_shape=self.target_shape, with_augmentation=self.augment, custom_augmentation=self.custom_augmentation, batch_size=batch_size, num_classes=self.get_num_classes(), preprocessing=self.preprocessing, fullinfo=fullinfo, doublelabel=doublelabel) return self.gen def get_num_classes(self): return CLASSES def test_multi(dataset="test", debug_samples=None): if dataset.startswith("train") or dataset.startswith("val"): print(dataset, debug_samples if debug_samples is not None else '') dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dt.get_generator() else: dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dv.get_generator() i = 0 for batch in tqdm(gen): for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]): facemax = np.max(im) facemin = np.min(im) print("Sample:", i) print("Labels:", gender, age, ethnicity, emotion) print("Gender:", verbose_gender(gender), "- Age:", verbose_age(age), "- Ethnicity:", verbose_ethnicity(ethnicity), "- Emotion:", verbose_emotion(emotion)) im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8) cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255)) cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im) i += 1 if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() return if '__main__' == __name__: test_multi("train") test_multi("val") test_multi("test")
print("Gender errors", errors["gender"]) print("Age errors", errors["age"]) print("Ethnicity errors", errors["ethnicity"])
conditional_block
mod.rs
//! GPU acceleration for BLAKE3. //! //! This module allows accelerating a [`Hasher`] through SPIR-V shaders. //! //! [`Hasher`]: ../struct.Hasher.html use super::*; use core::mem; use core::ops::{Deref, DerefMut}; use core::slice; /// Control uniform for the BLAKE3 shader. /// /// This uniform contains the information necessary for a BLAKE3 shader to /// correctly hash one level of the BLAKE3 tree structure. #[repr(C)] #[derive(Clone)] pub struct GpuControl { k: [u32; 8], t: [u32; 2], d: u32, } impl GpuControl { fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self { Self { k: *key, t: [counter_low(chunk_counter), counter_high(chunk_counter)], d: flags.into(), } } fn plus_chunks(&self, chunks: u64) -> Self { let t = self.chunk_counter() + chunks; Self { k: self.k, t: [counter_low(t), counter_high(t)], d: self.d, } } #[inline] fn key(&self) -> &CVWords { &self.k } #[inline] fn chunk_counter(&self) -> u64 { self.t[0] as u64 | (self.t[1] as u64) << 32 } #[inline] fn flags(&self) -> u8 { self.d as u8 } /// Returns the bytes to be copied to the control uniform in the GPU. /// /// The contents of the returned slice are opaque and should be interpreted /// only by the shader. #[inline] pub fn as_bytes(&self) -> &[u8] { // According to the specification, the host and the device must have // the same endianness, so no endian conversion is necessary even on // big-endian hosts. debug_assert_eq!( mem::size_of_val(self), shaders::blake3::CONTROL_UNIFORM_SIZE, "must not have padding" ); unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) } } } // Variant of compress_subtree_wide which takes parents as input. fn compress_parents_wide<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, out: &mut [u8], ) -> usize { debug_assert!(input.len().is_power_of_two()); // Note that the single block case does *not* bump the SIMD degree up to 2 // when it is 1. This allows Rayon the option of multi-threading even the // 2-block case, which can help performance on smaller platforms. if input.len() <= platform.simd_degree() * BLOCK_LEN { return compress_parents_parallel(input, key, flags, platform, out); } // With more than simd_degree blocks, we need to recurse. Start by dividing // the input into left and right subtrees. (Note that this is only optimal // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree // of 3 or something, we'll need a more complicated strategy.) debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2"); let (left, right) = input.split_at(input.len() / 2); // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to // account for the special case of returning 2 outputs when the SIMD degree // is 1. let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let degree = if left.len() == BLOCK_LEN { // The "simd_degree=1 and we're at the leaf nodes" case. debug_assert_eq!(platform.simd_degree(), 1); 1 } else { cmp::max(platform.simd_degree(), 2) }; let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN); // Recurse! This uses multiple threads if the "rayon" feature is enabled. let (left_n, right_n) = J::join( || compress_parents_wide::<J>(left, key, flags, platform, left_out), || compress_parents_wide::<J>(right, key, flags, platform, right_out), left.len(), right.len(), ); // The special case again. If simd_degree=1, then we'll have left_n=1 and // right_n=1. Rather than compressing them into a single output, return // them directly, to make sure we always have at least two outputs. debug_assert_eq!(left_n, degree); debug_assert!(right_n >= 1 && right_n <= left_n); if left_n == 1 { out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]); return 2; } // Otherwise, do one layer of parent node compression. let num_children = left_n + right_n; compress_parents_parallel( &cv_array[..num_children * OUT_LEN], key, flags, platform, out, ) } // Variant of compress_subtree_to_parent_node which takes parents as input. fn compress_parents_to_parent_node<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, ) -> [u8; BLOCK_LEN] { debug_assert!(input.len() > BLOCK_LEN); let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array); debug_assert!(num_cvs >= 2); // If MAX_SIMD_DEGREE is greater than 2 and there's enough input, // compress_parents_wide() returns more than 2 chaining values. Condense // them into 2 by forming parent nodes repeatedly. let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2]; while num_cvs > 2 { let cv_slice = &cv_array[..num_cvs * OUT_LEN]; num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array); cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]); } *array_ref!(cv_array, 0, 2 * OUT_LEN) } /// GPU-accelerated Hasher. /// /// This is a wrapper around a [`Hasher`] which also allows exporting the key /// and flags to be used by a GPU shader, and importing the shader's result. /// /// This wrapper should be used with care, since incorrect use can lead to a /// wrong hash output. It also allows extracting the key from the state, which /// would otherwise not be allowed in safe code. /// /// This wrapper can be freely converted to its inner [`Hasher`], through the /// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`] /// wherever the extra functionality from this wrapper is not needed. /// /// [`Hasher`]: ../struct.Hasher.html #[derive(Clone, Debug, Default)] pub struct GpuHasher { inner: Hasher, } impl GpuHasher { /// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new). #[inline] pub fn new() -> Self { Self { inner: Hasher::new(), } } /// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed). #[inline] pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self { Self { inner: Hasher::new_keyed(key), } } /// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key). #[inline] pub fn new_derive_key(context: &str) -> Self { Self { inner: Hasher::new_derive_key(context), } } /// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter` /// or parent nodes. pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl { GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags) } /// GPU-accelerated version of [`update_with_join`]. /// /// Unlike [`update_with_join`], this method receives the parents computed /// by one or more applications of the BLAKE3 shader. /// /// This method has several restrictions. The size of the shader input must /// be a power of two, it must be naturally aligned within the hash input, /// and the hasher state must not have any leftover bytes in its internal /// buffers. The simplest way to follow these invariants is to use this /// method, with the same chunk count and buffer size, for all of the input /// except for a variable-sized tail, which can use [`update_with_join`] or /// [`update`]. /// /// Note: the chunk counter is implicit in this method, but it must be the
/// /// [`update`]: #method.update /// [`update_with_join`]: #method.update_with_join /// [`GpuControl`]: struct.GpuControl.html pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self { assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes"); let chunk_counter = self.chunk_state.chunk_counter; // These three checks make sure the increment of t0 in the shader did not overflow. assert!(chunk_count.is_power_of_two(), "bad chunk count"); assert!(chunk_count <= (1 << 32), "chunk count overflow"); assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash"); assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size"); let parent_count = (parents.len() / OUT_LEN) as u64; assert_eq!(chunk_count % parent_count, 0, "invalid child count"); // The lazy merge of the CV stack needs at least 2 inputs. // And compress_parents_to_parent_node needs at least 2 blocks. assert!(parent_count > 2, "invalid parent count"); // The shader inputs and outputs are 32-bit words, which are in native byte order. // The chunk shader byte swaps its input, but neither shader byte swaps its output. // Since the rest of the code assumes little endian, byte swap the buffer here. Self::swap_endian::<J>(parents); let cv_pair = compress_parents_to_parent_node::<J>( parents, &self.key, self.chunk_state.flags, self.chunk_state.platform, ); let left_cv = array_ref!(cv_pair, 0, 32); let right_cv = array_ref!(cv_pair, 32, 32); // Push the two CVs we received into the CV stack in order. Because // the stack merges lazily, this guarantees we aren't merging the // root. self.push_cv(left_cv, chunk_counter); self.push_cv(right_cv, chunk_counter + (chunk_count / 2)); self.chunk_state.chunk_counter += chunk_count; self } // CPU simulation of the BLAKE3 chunk shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_chunk_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * CHUNK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_chunk_shader::<J>(mid, left_in, left_out, control), || self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new(); for chunk in input.chunks_exact(CHUNK_LEN) { chunks.push(array_ref!(chunk, 0, CHUNK_LEN)); } self.chunk_state.platform.hash_many( &chunks, control.key(), control.chunk_counter(), IncrementCounter::Yes, control.flags(), CHUNK_START, CHUNK_END, output, ); } } // CPU simulation of the BLAKE3 parent shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_parent_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * BLOCK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_parent_shader::<J>(mid, left_in, left_out, control), || self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new(); for parent in input.chunks_exact(BLOCK_LEN) { parents.push(array_ref!(parent, 0, BLOCK_LEN)); } self.chunk_state.platform.hash_many( &parents, control.key(), 0, IncrementCounter::No, control.flags() | PARENT, 0, 0, output, ); } } #[doc(hidden)] #[cfg(target_endian = "big")] pub fn swap_endian<J: Join>(buffer: &mut [u8]) { debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size"); debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size"); if buffer.len() > OUT_LEN { let (left, right) = buffer.split_at_mut(buffer.len() / 2); let left_len = left.len(); let right_len = right.len(); J::join( || Self::swap_endian::<J>(left), || Self::swap_endian::<J>(right), left_len, right_len, ); } else { for buf in buffer.chunks_exact_mut(4) { buf.swap(0, 3); buf.swap(1, 2); } } } #[doc(hidden)] #[inline(always)] #[cfg(target_endian = "little")] pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {} } impl Deref for GpuHasher { type Target = Hasher; #[inline] fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for GpuHasher { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl From<GpuHasher> for Hasher { #[inline] fn from(hasher: GpuHasher) -> Hasher { hasher.inner } } /// SPIR-V shader modules. pub mod shaders { /// Shader module for one level of the BLAKE3 tree. pub mod blake3 { /// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "big")] pub fn chunk_shader() -> &'static [u8] { include_bytes!("shaders/blake3-chunk-be.spv") } /// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "little")] pub fn chunk_shader() -> &'static [u8] { include_bytes!("shaders/blake3-chunk-le.spv") } /// Returns the SPIR-V code for the parent shader module. pub fn parent_shader() -> &'static [u8] { include_bytes!("shaders/blake3-parent.spv") } /// The local workgroup size. pub const WORKGROUP_SIZE: usize = 128; /// The descriptor binding for the input buffer. pub const INPUT_BUFFER_BINDING: u32 = 0; /// The descriptor binding for the output buffer. pub const OUTPUT_BUFFER_BINDING: u32 = 1; /// The size of the control uniform. pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4; } } #[cfg(test)] #[cfg(feature = "std")] mod tests { use super::*; fn selftest_seq(len: usize) -> Vec<u8> { let seed = len as u32; let mut out = Vec::with_capacity(len); let mut a = seed.wrapping_mul(0xDEAD4BAD); let mut b = 1; for _ in 0..len { let t = a.wrapping_add(b); a = b; b = t; out.push((t >> 24) as u8); } out } #[cfg(not(feature = "rayon"))] type Join = join::SerialJoin; #[cfg(feature = "rayon")] type Join = join::RayonJoin; #[test] fn simulate_shader_one_level_once() { let len = CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_one_level_twice() { let len = CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 128, &input[..len], &mut buffer, &hasher.gpu_control(0), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); hasher.simulate_chunk_shader::<Join>( 128, &input[len..], &mut buffer, &hasher.gpu_control(128), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_once() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0)); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_twice() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[..len], &mut buffer1, &hasher.gpu_control(0), ); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[len..], &mut buffer1, &hasher.gpu_control(2 * 128), ); hasher.simulate_parent_shader::<Join>( 128, &buffer1, &mut buffer2, &hasher.gpu_control(2 * 128), ); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } }
/// same as the chunk counter in the [`GpuControl`] passed to the shader, /// otherwise it will lead to a wrong hash output. /// /// Note: on a big-endian host, this method will swap the endianness of the /// shader output in-place.
random_line_split
mod.rs
//! GPU acceleration for BLAKE3. //! //! This module allows accelerating a [`Hasher`] through SPIR-V shaders. //! //! [`Hasher`]: ../struct.Hasher.html use super::*; use core::mem; use core::ops::{Deref, DerefMut}; use core::slice; /// Control uniform for the BLAKE3 shader. /// /// This uniform contains the information necessary for a BLAKE3 shader to /// correctly hash one level of the BLAKE3 tree structure. #[repr(C)] #[derive(Clone)] pub struct GpuControl { k: [u32; 8], t: [u32; 2], d: u32, } impl GpuControl { fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self { Self { k: *key, t: [counter_low(chunk_counter), counter_high(chunk_counter)], d: flags.into(), } } fn plus_chunks(&self, chunks: u64) -> Self { let t = self.chunk_counter() + chunks; Self { k: self.k, t: [counter_low(t), counter_high(t)], d: self.d, } } #[inline] fn key(&self) -> &CVWords { &self.k } #[inline] fn chunk_counter(&self) -> u64 { self.t[0] as u64 | (self.t[1] as u64) << 32 } #[inline] fn flags(&self) -> u8 { self.d as u8 } /// Returns the bytes to be copied to the control uniform in the GPU. /// /// The contents of the returned slice are opaque and should be interpreted /// only by the shader. #[inline] pub fn as_bytes(&self) -> &[u8] { // According to the specification, the host and the device must have // the same endianness, so no endian conversion is necessary even on // big-endian hosts. debug_assert_eq!( mem::size_of_val(self), shaders::blake3::CONTROL_UNIFORM_SIZE, "must not have padding" ); unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) } } } // Variant of compress_subtree_wide which takes parents as input. fn compress_parents_wide<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, out: &mut [u8], ) -> usize { debug_assert!(input.len().is_power_of_two()); // Note that the single block case does *not* bump the SIMD degree up to 2 // when it is 1. This allows Rayon the option of multi-threading even the // 2-block case, which can help performance on smaller platforms. if input.len() <= platform.simd_degree() * BLOCK_LEN { return compress_parents_parallel(input, key, flags, platform, out); } // With more than simd_degree blocks, we need to recurse. Start by dividing // the input into left and right subtrees. (Note that this is only optimal // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree // of 3 or something, we'll need a more complicated strategy.) debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2"); let (left, right) = input.split_at(input.len() / 2); // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to // account for the special case of returning 2 outputs when the SIMD degree // is 1. let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let degree = if left.len() == BLOCK_LEN { // The "simd_degree=1 and we're at the leaf nodes" case. debug_assert_eq!(platform.simd_degree(), 1); 1 } else { cmp::max(platform.simd_degree(), 2) }; let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN); // Recurse! This uses multiple threads if the "rayon" feature is enabled. let (left_n, right_n) = J::join( || compress_parents_wide::<J>(left, key, flags, platform, left_out), || compress_parents_wide::<J>(right, key, flags, platform, right_out), left.len(), right.len(), ); // The special case again. If simd_degree=1, then we'll have left_n=1 and // right_n=1. Rather than compressing them into a single output, return // them directly, to make sure we always have at least two outputs. debug_assert_eq!(left_n, degree); debug_assert!(right_n >= 1 && right_n <= left_n); if left_n == 1 { out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]); return 2; } // Otherwise, do one layer of parent node compression. let num_children = left_n + right_n; compress_parents_parallel( &cv_array[..num_children * OUT_LEN], key, flags, platform, out, ) } // Variant of compress_subtree_to_parent_node which takes parents as input. fn compress_parents_to_parent_node<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, ) -> [u8; BLOCK_LEN] { debug_assert!(input.len() > BLOCK_LEN); let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array); debug_assert!(num_cvs >= 2); // If MAX_SIMD_DEGREE is greater than 2 and there's enough input, // compress_parents_wide() returns more than 2 chaining values. Condense // them into 2 by forming parent nodes repeatedly. let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2]; while num_cvs > 2 { let cv_slice = &cv_array[..num_cvs * OUT_LEN]; num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array); cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]); } *array_ref!(cv_array, 0, 2 * OUT_LEN) } /// GPU-accelerated Hasher. /// /// This is a wrapper around a [`Hasher`] which also allows exporting the key /// and flags to be used by a GPU shader, and importing the shader's result. /// /// This wrapper should be used with care, since incorrect use can lead to a /// wrong hash output. It also allows extracting the key from the state, which /// would otherwise not be allowed in safe code. /// /// This wrapper can be freely converted to its inner [`Hasher`], through the /// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`] /// wherever the extra functionality from this wrapper is not needed. /// /// [`Hasher`]: ../struct.Hasher.html #[derive(Clone, Debug, Default)] pub struct GpuHasher { inner: Hasher, } impl GpuHasher { /// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new). #[inline] pub fn new() -> Self { Self { inner: Hasher::new(), } } /// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed). #[inline] pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self { Self { inner: Hasher::new_keyed(key), } } /// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key). #[inline] pub fn new_derive_key(context: &str) -> Self { Self { inner: Hasher::new_derive_key(context), } } /// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter` /// or parent nodes. pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl { GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags) } /// GPU-accelerated version of [`update_with_join`]. /// /// Unlike [`update_with_join`], this method receives the parents computed /// by one or more applications of the BLAKE3 shader. /// /// This method has several restrictions. The size of the shader input must /// be a power of two, it must be naturally aligned within the hash input, /// and the hasher state must not have any leftover bytes in its internal /// buffers. The simplest way to follow these invariants is to use this /// method, with the same chunk count and buffer size, for all of the input /// except for a variable-sized tail, which can use [`update_with_join`] or /// [`update`]. /// /// Note: the chunk counter is implicit in this method, but it must be the /// same as the chunk counter in the [`GpuControl`] passed to the shader, /// otherwise it will lead to a wrong hash output. /// /// Note: on a big-endian host, this method will swap the endianness of the /// shader output in-place. /// /// [`update`]: #method.update /// [`update_with_join`]: #method.update_with_join /// [`GpuControl`]: struct.GpuControl.html pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self { assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes"); let chunk_counter = self.chunk_state.chunk_counter; // These three checks make sure the increment of t0 in the shader did not overflow. assert!(chunk_count.is_power_of_two(), "bad chunk count"); assert!(chunk_count <= (1 << 32), "chunk count overflow"); assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash"); assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size"); let parent_count = (parents.len() / OUT_LEN) as u64; assert_eq!(chunk_count % parent_count, 0, "invalid child count"); // The lazy merge of the CV stack needs at least 2 inputs. // And compress_parents_to_parent_node needs at least 2 blocks. assert!(parent_count > 2, "invalid parent count"); // The shader inputs and outputs are 32-bit words, which are in native byte order. // The chunk shader byte swaps its input, but neither shader byte swaps its output. // Since the rest of the code assumes little endian, byte swap the buffer here. Self::swap_endian::<J>(parents); let cv_pair = compress_parents_to_parent_node::<J>( parents, &self.key, self.chunk_state.flags, self.chunk_state.platform, ); let left_cv = array_ref!(cv_pair, 0, 32); let right_cv = array_ref!(cv_pair, 32, 32); // Push the two CVs we received into the CV stack in order. Because // the stack merges lazily, this guarantees we aren't merging the // root. self.push_cv(left_cv, chunk_counter); self.push_cv(right_cv, chunk_counter + (chunk_count / 2)); self.chunk_state.chunk_counter += chunk_count; self } // CPU simulation of the BLAKE3 chunk shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_chunk_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * CHUNK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_chunk_shader::<J>(mid, left_in, left_out, control), || self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new(); for chunk in input.chunks_exact(CHUNK_LEN) { chunks.push(array_ref!(chunk, 0, CHUNK_LEN)); } self.chunk_state.platform.hash_many( &chunks, control.key(), control.chunk_counter(), IncrementCounter::Yes, control.flags(), CHUNK_START, CHUNK_END, output, ); } } // CPU simulation of the BLAKE3 parent shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_parent_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * BLOCK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_parent_shader::<J>(mid, left_in, left_out, control), || self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new(); for parent in input.chunks_exact(BLOCK_LEN) { parents.push(array_ref!(parent, 0, BLOCK_LEN)); } self.chunk_state.platform.hash_many( &parents, control.key(), 0, IncrementCounter::No, control.flags() | PARENT, 0, 0, output, ); } } #[doc(hidden)] #[cfg(target_endian = "big")] pub fn swap_endian<J: Join>(buffer: &mut [u8]) { debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size"); debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size"); if buffer.len() > OUT_LEN { let (left, right) = buffer.split_at_mut(buffer.len() / 2); let left_len = left.len(); let right_len = right.len(); J::join( || Self::swap_endian::<J>(left), || Self::swap_endian::<J>(right), left_len, right_len, ); } else { for buf in buffer.chunks_exact_mut(4) { buf.swap(0, 3); buf.swap(1, 2); } } } #[doc(hidden)] #[inline(always)] #[cfg(target_endian = "little")] pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {} } impl Deref for GpuHasher { type Target = Hasher; #[inline] fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for GpuHasher { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl From<GpuHasher> for Hasher { #[inline] fn from(hasher: GpuHasher) -> Hasher { hasher.inner } } /// SPIR-V shader modules. pub mod shaders { /// Shader module for one level of the BLAKE3 tree. pub mod blake3 { /// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "big")] pub fn chunk_shader() -> &'static [u8]
/// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "little")] pub fn chunk_shader() -> &'static [u8] { include_bytes!("shaders/blake3-chunk-le.spv") } /// Returns the SPIR-V code for the parent shader module. pub fn parent_shader() -> &'static [u8] { include_bytes!("shaders/blake3-parent.spv") } /// The local workgroup size. pub const WORKGROUP_SIZE: usize = 128; /// The descriptor binding for the input buffer. pub const INPUT_BUFFER_BINDING: u32 = 0; /// The descriptor binding for the output buffer. pub const OUTPUT_BUFFER_BINDING: u32 = 1; /// The size of the control uniform. pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4; } } #[cfg(test)] #[cfg(feature = "std")] mod tests { use super::*; fn selftest_seq(len: usize) -> Vec<u8> { let seed = len as u32; let mut out = Vec::with_capacity(len); let mut a = seed.wrapping_mul(0xDEAD4BAD); let mut b = 1; for _ in 0..len { let t = a.wrapping_add(b); a = b; b = t; out.push((t >> 24) as u8); } out } #[cfg(not(feature = "rayon"))] type Join = join::SerialJoin; #[cfg(feature = "rayon")] type Join = join::RayonJoin; #[test] fn simulate_shader_one_level_once() { let len = CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_one_level_twice() { let len = CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 128, &input[..len], &mut buffer, &hasher.gpu_control(0), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); hasher.simulate_chunk_shader::<Join>( 128, &input[len..], &mut buffer, &hasher.gpu_control(128), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_once() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0)); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_twice() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[..len], &mut buffer1, &hasher.gpu_control(0), ); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[len..], &mut buffer1, &hasher.gpu_control(2 * 128), ); hasher.simulate_parent_shader::<Join>( 128, &buffer1, &mut buffer2, &hasher.gpu_control(2 * 128), ); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } }
{ include_bytes!("shaders/blake3-chunk-be.spv") }
identifier_body
mod.rs
//! GPU acceleration for BLAKE3. //! //! This module allows accelerating a [`Hasher`] through SPIR-V shaders. //! //! [`Hasher`]: ../struct.Hasher.html use super::*; use core::mem; use core::ops::{Deref, DerefMut}; use core::slice; /// Control uniform for the BLAKE3 shader. /// /// This uniform contains the information necessary for a BLAKE3 shader to /// correctly hash one level of the BLAKE3 tree structure. #[repr(C)] #[derive(Clone)] pub struct GpuControl { k: [u32; 8], t: [u32; 2], d: u32, } impl GpuControl { fn new(key: &CVWords, chunk_counter: u64, flags: u8) -> Self { Self { k: *key, t: [counter_low(chunk_counter), counter_high(chunk_counter)], d: flags.into(), } } fn plus_chunks(&self, chunks: u64) -> Self { let t = self.chunk_counter() + chunks; Self { k: self.k, t: [counter_low(t), counter_high(t)], d: self.d, } } #[inline] fn key(&self) -> &CVWords { &self.k } #[inline] fn chunk_counter(&self) -> u64 { self.t[0] as u64 | (self.t[1] as u64) << 32 } #[inline] fn
(&self) -> u8 { self.d as u8 } /// Returns the bytes to be copied to the control uniform in the GPU. /// /// The contents of the returned slice are opaque and should be interpreted /// only by the shader. #[inline] pub fn as_bytes(&self) -> &[u8] { // According to the specification, the host and the device must have // the same endianness, so no endian conversion is necessary even on // big-endian hosts. debug_assert_eq!( mem::size_of_val(self), shaders::blake3::CONTROL_UNIFORM_SIZE, "must not have padding" ); unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) } } } // Variant of compress_subtree_wide which takes parents as input. fn compress_parents_wide<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, out: &mut [u8], ) -> usize { debug_assert!(input.len().is_power_of_two()); // Note that the single block case does *not* bump the SIMD degree up to 2 // when it is 1. This allows Rayon the option of multi-threading even the // 2-block case, which can help performance on smaller platforms. if input.len() <= platform.simd_degree() * BLOCK_LEN { return compress_parents_parallel(input, key, flags, platform, out); } // With more than simd_degree blocks, we need to recurse. Start by dividing // the input into left and right subtrees. (Note that this is only optimal // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree // of 3 or something, we'll need a more complicated strategy.) debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2"); let (left, right) = input.split_at(input.len() / 2); // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to // account for the special case of returning 2 outputs when the SIMD degree // is 1. let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let degree = if left.len() == BLOCK_LEN { // The "simd_degree=1 and we're at the leaf nodes" case. debug_assert_eq!(platform.simd_degree(), 1); 1 } else { cmp::max(platform.simd_degree(), 2) }; let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN); // Recurse! This uses multiple threads if the "rayon" feature is enabled. let (left_n, right_n) = J::join( || compress_parents_wide::<J>(left, key, flags, platform, left_out), || compress_parents_wide::<J>(right, key, flags, platform, right_out), left.len(), right.len(), ); // The special case again. If simd_degree=1, then we'll have left_n=1 and // right_n=1. Rather than compressing them into a single output, return // them directly, to make sure we always have at least two outputs. debug_assert_eq!(left_n, degree); debug_assert!(right_n >= 1 && right_n <= left_n); if left_n == 1 { out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]); return 2; } // Otherwise, do one layer of parent node compression. let num_children = left_n + right_n; compress_parents_parallel( &cv_array[..num_children * OUT_LEN], key, flags, platform, out, ) } // Variant of compress_subtree_to_parent_node which takes parents as input. fn compress_parents_to_parent_node<J: Join>( input: &[u8], key: &CVWords, flags: u8, platform: Platform, ) -> [u8; BLOCK_LEN] { debug_assert!(input.len() > BLOCK_LEN); let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN]; let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array); debug_assert!(num_cvs >= 2); // If MAX_SIMD_DEGREE is greater than 2 and there's enough input, // compress_parents_wide() returns more than 2 chaining values. Condense // them into 2 by forming parent nodes repeatedly. let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2]; while num_cvs > 2 { let cv_slice = &cv_array[..num_cvs * OUT_LEN]; num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array); cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]); } *array_ref!(cv_array, 0, 2 * OUT_LEN) } /// GPU-accelerated Hasher. /// /// This is a wrapper around a [`Hasher`] which also allows exporting the key /// and flags to be used by a GPU shader, and importing the shader's result. /// /// This wrapper should be used with care, since incorrect use can lead to a /// wrong hash output. It also allows extracting the key from the state, which /// would otherwise not be allowed in safe code. /// /// This wrapper can be freely converted to its inner [`Hasher`], through the /// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`] /// wherever the extra functionality from this wrapper is not needed. /// /// [`Hasher`]: ../struct.Hasher.html #[derive(Clone, Debug, Default)] pub struct GpuHasher { inner: Hasher, } impl GpuHasher { /// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new). #[inline] pub fn new() -> Self { Self { inner: Hasher::new(), } } /// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed). #[inline] pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self { Self { inner: Hasher::new_keyed(key), } } /// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key). #[inline] pub fn new_derive_key(context: &str) -> Self { Self { inner: Hasher::new_derive_key(context), } } /// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter` /// or parent nodes. pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl { GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags) } /// GPU-accelerated version of [`update_with_join`]. /// /// Unlike [`update_with_join`], this method receives the parents computed /// by one or more applications of the BLAKE3 shader. /// /// This method has several restrictions. The size of the shader input must /// be a power of two, it must be naturally aligned within the hash input, /// and the hasher state must not have any leftover bytes in its internal /// buffers. The simplest way to follow these invariants is to use this /// method, with the same chunk count and buffer size, for all of the input /// except for a variable-sized tail, which can use [`update_with_join`] or /// [`update`]. /// /// Note: the chunk counter is implicit in this method, but it must be the /// same as the chunk counter in the [`GpuControl`] passed to the shader, /// otherwise it will lead to a wrong hash output. /// /// Note: on a big-endian host, this method will swap the endianness of the /// shader output in-place. /// /// [`update`]: #method.update /// [`update_with_join`]: #method.update_with_join /// [`GpuControl`]: struct.GpuControl.html pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self { assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes"); let chunk_counter = self.chunk_state.chunk_counter; // These three checks make sure the increment of t0 in the shader did not overflow. assert!(chunk_count.is_power_of_two(), "bad chunk count"); assert!(chunk_count <= (1 << 32), "chunk count overflow"); assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash"); assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size"); let parent_count = (parents.len() / OUT_LEN) as u64; assert_eq!(chunk_count % parent_count, 0, "invalid child count"); // The lazy merge of the CV stack needs at least 2 inputs. // And compress_parents_to_parent_node needs at least 2 blocks. assert!(parent_count > 2, "invalid parent count"); // The shader inputs and outputs are 32-bit words, which are in native byte order. // The chunk shader byte swaps its input, but neither shader byte swaps its output. // Since the rest of the code assumes little endian, byte swap the buffer here. Self::swap_endian::<J>(parents); let cv_pair = compress_parents_to_parent_node::<J>( parents, &self.key, self.chunk_state.flags, self.chunk_state.platform, ); let left_cv = array_ref!(cv_pair, 0, 32); let right_cv = array_ref!(cv_pair, 32, 32); // Push the two CVs we received into the CV stack in order. Because // the stack merges lazily, this guarantees we aren't merging the // root. self.push_cv(left_cv, chunk_counter); self.push_cv(right_cv, chunk_counter + (chunk_count / 2)); self.chunk_state.chunk_counter += chunk_count; self } // CPU simulation of the BLAKE3 chunk shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_chunk_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * CHUNK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_chunk_shader::<J>(mid, left_in, left_out, control), || self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new(); for chunk in input.chunks_exact(CHUNK_LEN) { chunks.push(array_ref!(chunk, 0, CHUNK_LEN)); } self.chunk_state.platform.hash_many( &chunks, control.key(), control.chunk_counter(), IncrementCounter::Yes, control.flags(), CHUNK_START, CHUNK_END, output, ); } } // CPU simulation of the BLAKE3 parent shader. // // This can be used to test the real shader. // // Note: unlike the real shader, this simulation always uses little-endian // inputs and outputs. #[doc(hidden)] pub fn simulate_parent_shader<J: Join>( &self, count: usize, input: &[u8], output: &mut [u8], control: &GpuControl, ) { assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size"); assert_eq!(output.len(), count * OUT_LEN, "invalid output size"); if count > self.chunk_state.platform.simd_degree() { let mid = count / 2; let (left_in, right_in) = input.split_at(mid * BLOCK_LEN); let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN); let control_r = control.plus_chunks(mid as u64); J::join( || self.simulate_parent_shader::<J>(mid, left_in, left_out, control), || self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r), left_in.len(), right_in.len(), ); } else if count > 0 { let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new(); for parent in input.chunks_exact(BLOCK_LEN) { parents.push(array_ref!(parent, 0, BLOCK_LEN)); } self.chunk_state.platform.hash_many( &parents, control.key(), 0, IncrementCounter::No, control.flags() | PARENT, 0, 0, output, ); } } #[doc(hidden)] #[cfg(target_endian = "big")] pub fn swap_endian<J: Join>(buffer: &mut [u8]) { debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size"); debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size"); if buffer.len() > OUT_LEN { let (left, right) = buffer.split_at_mut(buffer.len() / 2); let left_len = left.len(); let right_len = right.len(); J::join( || Self::swap_endian::<J>(left), || Self::swap_endian::<J>(right), left_len, right_len, ); } else { for buf in buffer.chunks_exact_mut(4) { buf.swap(0, 3); buf.swap(1, 2); } } } #[doc(hidden)] #[inline(always)] #[cfg(target_endian = "little")] pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {} } impl Deref for GpuHasher { type Target = Hasher; #[inline] fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for GpuHasher { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl From<GpuHasher> for Hasher { #[inline] fn from(hasher: GpuHasher) -> Hasher { hasher.inner } } /// SPIR-V shader modules. pub mod shaders { /// Shader module for one level of the BLAKE3 tree. pub mod blake3 { /// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "big")] pub fn chunk_shader() -> &'static [u8] { include_bytes!("shaders/blake3-chunk-be.spv") } /// Returns the SPIR-V code for the chunk shader module. #[cfg(target_endian = "little")] pub fn chunk_shader() -> &'static [u8] { include_bytes!("shaders/blake3-chunk-le.spv") } /// Returns the SPIR-V code for the parent shader module. pub fn parent_shader() -> &'static [u8] { include_bytes!("shaders/blake3-parent.spv") } /// The local workgroup size. pub const WORKGROUP_SIZE: usize = 128; /// The descriptor binding for the input buffer. pub const INPUT_BUFFER_BINDING: u32 = 0; /// The descriptor binding for the output buffer. pub const OUTPUT_BUFFER_BINDING: u32 = 1; /// The size of the control uniform. pub const CONTROL_UNIFORM_SIZE: usize = 11 * 4; } } #[cfg(test)] #[cfg(feature = "std")] mod tests { use super::*; fn selftest_seq(len: usize) -> Vec<u8> { let seed = len as u32; let mut out = Vec::with_capacity(len); let mut a = seed.wrapping_mul(0xDEAD4BAD); let mut b = 1; for _ in 0..len { let t = a.wrapping_add(b); a = b; b = t; out.push((t >> 24) as u8); } out } #[cfg(not(feature = "rayon"))] type Join = join::SerialJoin; #[cfg(feature = "rayon")] type Join = join::RayonJoin; #[test] fn simulate_shader_one_level_once() { let len = CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(128, &input, &mut buffer, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_one_level_twice() { let len = CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 128, &input[..len], &mut buffer, &hasher.gpu_control(0), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); hasher.simulate_chunk_shader::<Join>( 128, &input[len..], &mut buffer, &hasher.gpu_control(128), ); GpuHasher::swap_endian::<Join>(&mut buffer); hasher.update_from_gpu::<Join>(128, &mut buffer); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_once() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>(2 * 128, &input, &mut buffer1, &hasher.gpu_control(0)); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } #[test] fn simulate_shader_two_levels_twice() { let len = 2 * CHUNK_LEN * 128; let input = selftest_seq(2 * len); let expected = Hasher::new().update_with_join::<Join>(&input).finalize(); let mut hasher = GpuHasher::new(); let mut buffer1 = vec![0; 2 * OUT_LEN * 128]; let mut buffer2 = vec![0; OUT_LEN * 128]; hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[..len], &mut buffer1, &hasher.gpu_control(0), ); hasher.simulate_parent_shader::<Join>(128, &buffer1, &mut buffer2, &hasher.gpu_control(0)); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); hasher.simulate_chunk_shader::<Join>( 2 * 128, &input[len..], &mut buffer1, &hasher.gpu_control(2 * 128), ); hasher.simulate_parent_shader::<Join>( 128, &buffer1, &mut buffer2, &hasher.gpu_control(2 * 128), ); GpuHasher::swap_endian::<Join>(&mut buffer2); hasher.update_from_gpu::<Join>(2 * 128, &mut buffer2); assert_eq!(hasher.finalize(), expected); } }
flags
identifier_name
transfer_leads.py
import json from simple_salesforce import Salesforce import os import boto3 import datetime def lambda_handler(event, context): #gathers JSON file from S3 that was posted from Chrome River SFDC via the transfer_leads_trigger lambda function bucket = event['Records'][0]['s3']['bucket']['name'] key = event['Records'][0]['s3']['object']['key'] s3=boto3.resource('s3') obj = s3.Object(event['Records'][0]['s3']['bucket']['name'],event['Records'][0]['s3']['object']['key']) body = obj.get()['Body'].read() input_body = json.loads(body) idList = input_body.get('Idlist') #gathers leads via SOQL through simple Salesforce library lead_list = _get_lead_list(idList) #standardizes picklist field values and creates value for Chrome River Transfer Notes field standardized_list = add_notes_and_standardize(lead_list) #sends to Certify SFDC instance result_dict = send_to_certify(standardized_list) print(result_dict) #posts notification to Slack upon failure to insert to Certify SFDC if(result_dict[0].get('success') == False): message = f"LEAD TRANSFER TO CERTIFY FAILURE \n" message += f"failed lead insert for the following IDs: \n" for num in range(len(lead_list)): idval = lead_list[num].get("Id") message += idval if num != (len(idList) - 1): message += f", " else: message += "\n" message += f"Returned error log from Salesforce: \n" message += result_dict[0].get('errors')[0].get('message') _publish_alert(message) else: #deletes JSON file within S3 s3 = boto3.client('s3') s3.delete_object(Bucket=bucket,Key=key) return { 'statusCode': 200, 'body': json.dumps('Transfer complete') } def _get_lead_list(idList): query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE " id_query_string = "" for num in range(len(idList)): id_query_string += "(ID = '" + idList[num] + "')" if num != (len(idList) - 1): id_query_string += " OR " query_string += id_query_string sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host']) sf_data = sf.query_all(query_string) return sf_data['records'] def create_new_dict(lead_dict): new_dict = {} new_dict['FirstName'] = lead_dict.get('FirstName') new_dict['LastName'] = lead_dict.get('LastName') new_dict['Company'] = lead_dict.get('Company') new_dict['Title'] = lead_dict.get('Title') new_dict['Phone'] = lead_dict.get('Phone') new_dict['Email'] = lead_dict.get('Email') new_dict['Fax'] = lead_dict.get('Fax') new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c') new_dict['Street'] = lead_dict.get('Street') new_dict['State'] = lead_dict.get('State') new_dict['City'] = lead_dict.get('City') new_dict['PostalCode'] = lead_dict.get('PostalCode') new_dict['Country'] = lead_dict.get('Country') new_dict['Website'] = lead_dict.get('Website') new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees') new_dict['Industry'] = lead_dict.get('Industry') new_dict['LeadSource'] = lead_dict.get('LeadSource') new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c') new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c') new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c') print(new_dict) return new_dict def send_to_certify(lead_list): sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host']) return sf.bulk.Lead.insert(lead_list,batch_size=200) def add_notes_and_standardize(lead_list): new_dict_array = [] for lead in lead_list: lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead)) lead.__setitem__('Employee_Range__c',standardize_employee_range(lead)) lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead)) lead['LeadSource'] = 'Chrome River Transfer' lead['Industry'] = standardize_industry(lead) if(lead.get('Country') != None): lead['Country'] = standardize_country(lead) if(lead.get('State') != None): lead['State'] = standardize_state(lead) new_dict_array.append(create_new_dict(lead)) return new_dict_array def mql_verify(lead_dict): mql_status = False if (lead_dict.get('Recent_Conversion__c') != None): mql_status = True print(mql_status) return mql_status def generate_cr_notes_field(lead_dict): note_text = '' if(lead_dict.get('LeadSource') != None): note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | ' if(lead_dict.get('Recent_Conversion__c') != None): note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |' if(lead_dict.get('Tasks') != None): note_text += generate_task_summary(lead_dict.get('Tasks').get('records')) if(lead_dict.get('CampaignMembers') != None): note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records')) return note_text def generate_task_summary(task_list): task_text = 'Tasks: ' for task in task_list: task_text += ' ( ' + task.get('Subject') + ' ) ' task_text += '| ' return task_text def generate_campaign_summary(campaign_mem_list): campaign_mem_text = 'Campaigns: ' for campaign in campaign_mem_list: campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) ' campaign_mem_text += '| ' return campaign_mem_text def standardize_employee_range(lead_dict): e_count = lead_dict.get('NumberOfEmployees') e_range = '' if(e_count < 26): e_range = '1-25' elif(e_count > 26): e_range = '26-200' return e_range def
(lead_dict): cr_industry = lead_dict.get('Industry') cert_industry = lead_dict.get('Industry') if(cr_industry == 'Accounting'): cert_industry = 'Business Services' elif(cr_industry == 'Advertising'): cert_industry = 'Business Services' elif(cr_industry == 'Apparel'): cert_industry = 'Manufacturing' elif(cr_industry == 'Architecture'): cert_industry = 'Business Services' elif(cr_industry == 'Banking'): cert_industry = 'Finance' elif(cr_industry == 'Biotechnology'): cert_industry = 'Healthcare' elif(cr_industry == 'Chemicals'): cert_industry = 'Manufacturing' elif(cr_industry == 'Communications'): cert_industry = 'Telecommunications' elif(cr_industry == 'Consulting'): cert_industry = 'Business Services' elif(cr_industry == 'Electronics'): cert_industry = 'Manufacturing' elif(cr_industry == 'Energy'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Engineering'): cert_industry = 'Business Services' elif(cr_industry == 'Entertainment'): cert_industry = 'Consumer Services' elif(cr_industry == 'Environmental'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Food & Beverage'): cert_industry = 'Consumer Services' elif(cr_industry == 'Machinery'): cert_industry = 'Industrial' elif(cr_industry == 'Media'): cert_industry = 'Media & Internet' elif(cr_industry == 'Not For Profit'): cert_industry = 'Organizations' elif(cr_industry == 'Other'): cert_industry = 'Industrial' elif(cr_industry == 'Professional Service'): cert_industry = 'Business Services' elif(cr_industry == 'Public Relations'): cert_industry = 'Business Services' elif(cr_industry == 'Recreation'): cert_industry = 'Consumer Services' elif(cr_industry == 'Shipping'): cert_industry = 'Transportation' elif(cr_industry == 'Sports'): cert_industry = 'Media & Internet' elif(cr_industry == 'Technology'): cert_industry = 'Software' elif(cr_industry == 'Telecom'): cert_industry = 'Telecommunications' elif(cr_industry == 'Travel'): cert_industry = 'Consumer Services' elif(cr_industry == 'Utilities'): cert_industry = 'Energy, Utilities & Waste Treatment' else: cert_industry = 'Industrial' return cert_industry def standardize_country(lead_dict): cr_country = lead_dict.get('Country') cert_country = lead_dict.get('Country') if(cr_country == 'Bolivia'): cert_country = 'Bolivia, Plurinational State of' elif(cr_country == 'Iran'): cert_country = 'Iran, Islamic Republic of' elif(cr_country == 'North Korea'): cert_country = 'Korea, Democratic People\'s Republic of' elif(cr_country == 'South Korea'): cert_country = 'Korea, Republic of' elif(cr_country == 'Laos'): cert_country = 'Lao People\'s Democratic Republic' elif(cr_country == 'Moldova'): cert_country = 'Moldova, Republic of' elif(cr_country == 'Marshall Islands'): cert_country = 'Saint Martin (French part)' elif(cr_country == 'Macedonia'): cert_country = 'Greece' elif(cr_country == 'Russia'): cert_country = 'Russian Federation' elif(cr_country == 'Saint Helena'): cert_country = 'Saint Helena, Ascension and Tristan da Cunha' elif(cr_country == 'Tanzania'): cert_country = 'Tanzania, United Republic of' elif(cr_country == 'Vatican City State'): cert_country = 'Holy See (Vatican City State)' elif(cr_country == 'Venezuela'): cert_country = 'Venezuela, Bolivarian Republic of' elif(cr_country == 'Viet nam'): cert_country = 'Vietnam' return cert_country def standardize_state(lead_dict): cert_country = lead_dict.get('Country') cr_state = lead_dict.get('State') cert_state = lead_dict.get('State') if(cert_country == 'Australia'): if(cr_state == 'Brisbane'): cert_state = 'Queensland' if(cert_country == 'China'): if(cr_state == 'Chinese Taipei'): cert_state = 'Taiwan' if(cert_country == 'United Kingdom'): cert_state = None return cert_state def _publish_alert(alert_message): data = {'message':alert_message} json_data = json.dumps(data) sns = boto3.client('sns') sns.publish( TopicArn='arn:aws:sns:us-east-1:374175877904:hamster_alerts', Message=str(json_data))
standardize_industry
identifier_name
transfer_leads.py
import json from simple_salesforce import Salesforce import os import boto3 import datetime def lambda_handler(event, context): #gathers JSON file from S3 that was posted from Chrome River SFDC via the transfer_leads_trigger lambda function bucket = event['Records'][0]['s3']['bucket']['name'] key = event['Records'][0]['s3']['object']['key'] s3=boto3.resource('s3') obj = s3.Object(event['Records'][0]['s3']['bucket']['name'],event['Records'][0]['s3']['object']['key']) body = obj.get()['Body'].read() input_body = json.loads(body) idList = input_body.get('Idlist') #gathers leads via SOQL through simple Salesforce library lead_list = _get_lead_list(idList) #standardizes picklist field values and creates value for Chrome River Transfer Notes field standardized_list = add_notes_and_standardize(lead_list) #sends to Certify SFDC instance result_dict = send_to_certify(standardized_list) print(result_dict) #posts notification to Slack upon failure to insert to Certify SFDC if(result_dict[0].get('success') == False): message = f"LEAD TRANSFER TO CERTIFY FAILURE \n" message += f"failed lead insert for the following IDs: \n" for num in range(len(lead_list)): idval = lead_list[num].get("Id") message += idval if num != (len(idList) - 1): message += f", " else: message += "\n" message += f"Returned error log from Salesforce: \n" message += result_dict[0].get('errors')[0].get('message') _publish_alert(message) else: #deletes JSON file within S3 s3 = boto3.client('s3') s3.delete_object(Bucket=bucket,Key=key) return { 'statusCode': 200, 'body': json.dumps('Transfer complete') } def _get_lead_list(idList): query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE " id_query_string = "" for num in range(len(idList)): id_query_string += "(ID = '" + idList[num] + "')" if num != (len(idList) - 1): id_query_string += " OR " query_string += id_query_string sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host']) sf_data = sf.query_all(query_string) return sf_data['records'] def create_new_dict(lead_dict): new_dict = {} new_dict['FirstName'] = lead_dict.get('FirstName') new_dict['LastName'] = lead_dict.get('LastName') new_dict['Company'] = lead_dict.get('Company') new_dict['Title'] = lead_dict.get('Title') new_dict['Phone'] = lead_dict.get('Phone') new_dict['Email'] = lead_dict.get('Email') new_dict['Fax'] = lead_dict.get('Fax') new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c') new_dict['Street'] = lead_dict.get('Street') new_dict['State'] = lead_dict.get('State') new_dict['City'] = lead_dict.get('City') new_dict['PostalCode'] = lead_dict.get('PostalCode') new_dict['Country'] = lead_dict.get('Country') new_dict['Website'] = lead_dict.get('Website') new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees') new_dict['Industry'] = lead_dict.get('Industry') new_dict['LeadSource'] = lead_dict.get('LeadSource') new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c') new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c') new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c') print(new_dict) return new_dict def send_to_certify(lead_list): sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host']) return sf.bulk.Lead.insert(lead_list,batch_size=200) def add_notes_and_standardize(lead_list): new_dict_array = [] for lead in lead_list: lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead)) lead.__setitem__('Employee_Range__c',standardize_employee_range(lead)) lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead)) lead['LeadSource'] = 'Chrome River Transfer' lead['Industry'] = standardize_industry(lead) if(lead.get('Country') != None): lead['Country'] = standardize_country(lead) if(lead.get('State') != None): lead['State'] = standardize_state(lead) new_dict_array.append(create_new_dict(lead)) return new_dict_array def mql_verify(lead_dict): mql_status = False if (lead_dict.get('Recent_Conversion__c') != None): mql_status = True print(mql_status) return mql_status def generate_cr_notes_field(lead_dict): note_text = '' if(lead_dict.get('LeadSource') != None): note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | ' if(lead_dict.get('Recent_Conversion__c') != None): note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |' if(lead_dict.get('Tasks') != None): note_text += generate_task_summary(lead_dict.get('Tasks').get('records')) if(lead_dict.get('CampaignMembers') != None): note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records')) return note_text def generate_task_summary(task_list): task_text = 'Tasks: ' for task in task_list: task_text += ' ( ' + task.get('Subject') + ' ) ' task_text += '| ' return task_text def generate_campaign_summary(campaign_mem_list): campaign_mem_text = 'Campaigns: ' for campaign in campaign_mem_list: campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) ' campaign_mem_text += '| ' return campaign_mem_text def standardize_employee_range(lead_dict): e_count = lead_dict.get('NumberOfEmployees') e_range = '' if(e_count < 26): e_range = '1-25' elif(e_count > 26): e_range = '26-200' return e_range def standardize_industry(lead_dict): cr_industry = lead_dict.get('Industry') cert_industry = lead_dict.get('Industry') if(cr_industry == 'Accounting'): cert_industry = 'Business Services' elif(cr_industry == 'Advertising'): cert_industry = 'Business Services' elif(cr_industry == 'Apparel'): cert_industry = 'Manufacturing' elif(cr_industry == 'Architecture'): cert_industry = 'Business Services' elif(cr_industry == 'Banking'): cert_industry = 'Finance' elif(cr_industry == 'Biotechnology'): cert_industry = 'Healthcare' elif(cr_industry == 'Chemicals'): cert_industry = 'Manufacturing' elif(cr_industry == 'Communications'): cert_industry = 'Telecommunications' elif(cr_industry == 'Consulting'): cert_industry = 'Business Services' elif(cr_industry == 'Electronics'): cert_industry = 'Manufacturing' elif(cr_industry == 'Energy'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Engineering'): cert_industry = 'Business Services' elif(cr_industry == 'Entertainment'): cert_industry = 'Consumer Services' elif(cr_industry == 'Environmental'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Food & Beverage'): cert_industry = 'Consumer Services' elif(cr_industry == 'Machinery'): cert_industry = 'Industrial' elif(cr_industry == 'Media'): cert_industry = 'Media & Internet' elif(cr_industry == 'Not For Profit'): cert_industry = 'Organizations' elif(cr_industry == 'Other'): cert_industry = 'Industrial' elif(cr_industry == 'Professional Service'): cert_industry = 'Business Services' elif(cr_industry == 'Public Relations'): cert_industry = 'Business Services' elif(cr_industry == 'Recreation'): cert_industry = 'Consumer Services' elif(cr_industry == 'Shipping'): cert_industry = 'Transportation' elif(cr_industry == 'Sports'): cert_industry = 'Media & Internet' elif(cr_industry == 'Technology'): cert_industry = 'Software' elif(cr_industry == 'Telecom'): cert_industry = 'Telecommunications' elif(cr_industry == 'Travel'): cert_industry = 'Consumer Services' elif(cr_industry == 'Utilities'): cert_industry = 'Energy, Utilities & Waste Treatment' else: cert_industry = 'Industrial' return cert_industry def standardize_country(lead_dict): cr_country = lead_dict.get('Country') cert_country = lead_dict.get('Country') if(cr_country == 'Bolivia'): cert_country = 'Bolivia, Plurinational State of' elif(cr_country == 'Iran'): cert_country = 'Iran, Islamic Republic of' elif(cr_country == 'North Korea'): cert_country = 'Korea, Democratic People\'s Republic of' elif(cr_country == 'South Korea'): cert_country = 'Korea, Republic of' elif(cr_country == 'Laos'): cert_country = 'Lao People\'s Democratic Republic' elif(cr_country == 'Moldova'): cert_country = 'Moldova, Republic of' elif(cr_country == 'Marshall Islands'): cert_country = 'Saint Martin (French part)' elif(cr_country == 'Macedonia'): cert_country = 'Greece' elif(cr_country == 'Russia'): cert_country = 'Russian Federation' elif(cr_country == 'Saint Helena'): cert_country = 'Saint Helena, Ascension and Tristan da Cunha' elif(cr_country == 'Tanzania'): cert_country = 'Tanzania, United Republic of' elif(cr_country == 'Vatican City State'): cert_country = 'Holy See (Vatican City State)' elif(cr_country == 'Venezuela'): cert_country = 'Venezuela, Bolivarian Republic of' elif(cr_country == 'Viet nam'): cert_country = 'Vietnam' return cert_country def standardize_state(lead_dict): cert_country = lead_dict.get('Country') cr_state = lead_dict.get('State') cert_state = lead_dict.get('State') if(cert_country == 'Australia'): if(cr_state == 'Brisbane'): cert_state = 'Queensland' if(cert_country == 'China'): if(cr_state == 'Chinese Taipei'): cert_state = 'Taiwan' if(cert_country == 'United Kingdom'):
return cert_state def _publish_alert(alert_message): data = {'message':alert_message} json_data = json.dumps(data) sns = boto3.client('sns') sns.publish( TopicArn='arn:aws:sns:us-east-1:374175877904:hamster_alerts', Message=str(json_data))
cert_state = None
conditional_block
transfer_leads.py
import json from simple_salesforce import Salesforce import os import boto3 import datetime def lambda_handler(event, context): #gathers JSON file from S3 that was posted from Chrome River SFDC via the transfer_leads_trigger lambda function bucket = event['Records'][0]['s3']['bucket']['name'] key = event['Records'][0]['s3']['object']['key'] s3=boto3.resource('s3') obj = s3.Object(event['Records'][0]['s3']['bucket']['name'],event['Records'][0]['s3']['object']['key']) body = obj.get()['Body'].read() input_body = json.loads(body) idList = input_body.get('Idlist') #gathers leads via SOQL through simple Salesforce library lead_list = _get_lead_list(idList) #standardizes picklist field values and creates value for Chrome River Transfer Notes field standardized_list = add_notes_and_standardize(lead_list) #sends to Certify SFDC instance result_dict = send_to_certify(standardized_list) print(result_dict) #posts notification to Slack upon failure to insert to Certify SFDC if(result_dict[0].get('success') == False): message = f"LEAD TRANSFER TO CERTIFY FAILURE \n" message += f"failed lead insert for the following IDs: \n" for num in range(len(lead_list)): idval = lead_list[num].get("Id") message += idval if num != (len(idList) - 1): message += f", " else: message += "\n" message += f"Returned error log from Salesforce: \n" message += result_dict[0].get('errors')[0].get('message') _publish_alert(message) else: #deletes JSON file within S3 s3 = boto3.client('s3') s3.delete_object(Bucket=bucket,Key=key) return { 'statusCode': 200, 'body': json.dumps('Transfer complete') } def _get_lead_list(idList): query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE " id_query_string = "" for num in range(len(idList)): id_query_string += "(ID = '" + idList[num] + "')" if num != (len(idList) - 1): id_query_string += " OR " query_string += id_query_string sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host']) sf_data = sf.query_all(query_string) return sf_data['records'] def create_new_dict(lead_dict): new_dict = {} new_dict['FirstName'] = lead_dict.get('FirstName') new_dict['LastName'] = lead_dict.get('LastName') new_dict['Company'] = lead_dict.get('Company') new_dict['Title'] = lead_dict.get('Title') new_dict['Phone'] = lead_dict.get('Phone') new_dict['Email'] = lead_dict.get('Email') new_dict['Fax'] = lead_dict.get('Fax') new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c') new_dict['Street'] = lead_dict.get('Street') new_dict['State'] = lead_dict.get('State') new_dict['City'] = lead_dict.get('City') new_dict['PostalCode'] = lead_dict.get('PostalCode') new_dict['Country'] = lead_dict.get('Country') new_dict['Website'] = lead_dict.get('Website') new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees') new_dict['Industry'] = lead_dict.get('Industry') new_dict['LeadSource'] = lead_dict.get('LeadSource') new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c') new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c') new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c') print(new_dict) return new_dict def send_to_certify(lead_list): sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host']) return sf.bulk.Lead.insert(lead_list,batch_size=200) def add_notes_and_standardize(lead_list): new_dict_array = [] for lead in lead_list: lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead)) lead.__setitem__('Employee_Range__c',standardize_employee_range(lead)) lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead)) lead['LeadSource'] = 'Chrome River Transfer' lead['Industry'] = standardize_industry(lead) if(lead.get('Country') != None): lead['Country'] = standardize_country(lead) if(lead.get('State') != None): lead['State'] = standardize_state(lead) new_dict_array.append(create_new_dict(lead)) return new_dict_array def mql_verify(lead_dict): mql_status = False if (lead_dict.get('Recent_Conversion__c') != None): mql_status = True print(mql_status) return mql_status def generate_cr_notes_field(lead_dict): note_text = '' if(lead_dict.get('LeadSource') != None): note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | ' if(lead_dict.get('Recent_Conversion__c') != None): note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |' if(lead_dict.get('Tasks') != None): note_text += generate_task_summary(lead_dict.get('Tasks').get('records')) if(lead_dict.get('CampaignMembers') != None): note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records')) return note_text def generate_task_summary(task_list): task_text = 'Tasks: ' for task in task_list: task_text += ' ( ' + task.get('Subject') + ' ) ' task_text += '| ' return task_text def generate_campaign_summary(campaign_mem_list): campaign_mem_text = 'Campaigns: ' for campaign in campaign_mem_list: campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) ' campaign_mem_text += '| ' return campaign_mem_text def standardize_employee_range(lead_dict): e_count = lead_dict.get('NumberOfEmployees') e_range = '' if(e_count < 26): e_range = '1-25' elif(e_count > 26): e_range = '26-200' return e_range def standardize_industry(lead_dict): cr_industry = lead_dict.get('Industry') cert_industry = lead_dict.get('Industry') if(cr_industry == 'Accounting'): cert_industry = 'Business Services' elif(cr_industry == 'Advertising'): cert_industry = 'Business Services' elif(cr_industry == 'Apparel'): cert_industry = 'Manufacturing' elif(cr_industry == 'Architecture'): cert_industry = 'Business Services' elif(cr_industry == 'Banking'): cert_industry = 'Finance' elif(cr_industry == 'Biotechnology'): cert_industry = 'Healthcare' elif(cr_industry == 'Chemicals'): cert_industry = 'Manufacturing' elif(cr_industry == 'Communications'): cert_industry = 'Telecommunications' elif(cr_industry == 'Consulting'): cert_industry = 'Business Services' elif(cr_industry == 'Electronics'): cert_industry = 'Manufacturing' elif(cr_industry == 'Energy'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Engineering'): cert_industry = 'Business Services' elif(cr_industry == 'Entertainment'): cert_industry = 'Consumer Services' elif(cr_industry == 'Environmental'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Food & Beverage'): cert_industry = 'Consumer Services' elif(cr_industry == 'Machinery'): cert_industry = 'Industrial' elif(cr_industry == 'Media'): cert_industry = 'Media & Internet' elif(cr_industry == 'Not For Profit'): cert_industry = 'Organizations' elif(cr_industry == 'Other'): cert_industry = 'Industrial' elif(cr_industry == 'Professional Service'): cert_industry = 'Business Services' elif(cr_industry == 'Public Relations'): cert_industry = 'Business Services' elif(cr_industry == 'Recreation'): cert_industry = 'Consumer Services' elif(cr_industry == 'Shipping'): cert_industry = 'Transportation' elif(cr_industry == 'Sports'): cert_industry = 'Media & Internet' elif(cr_industry == 'Technology'): cert_industry = 'Software' elif(cr_industry == 'Telecom'): cert_industry = 'Telecommunications' elif(cr_industry == 'Travel'): cert_industry = 'Consumer Services' elif(cr_industry == 'Utilities'): cert_industry = 'Energy, Utilities & Waste Treatment' else: cert_industry = 'Industrial' return cert_industry def standardize_country(lead_dict):
def standardize_state(lead_dict): cert_country = lead_dict.get('Country') cr_state = lead_dict.get('State') cert_state = lead_dict.get('State') if(cert_country == 'Australia'): if(cr_state == 'Brisbane'): cert_state = 'Queensland' if(cert_country == 'China'): if(cr_state == 'Chinese Taipei'): cert_state = 'Taiwan' if(cert_country == 'United Kingdom'): cert_state = None return cert_state def _publish_alert(alert_message): data = {'message':alert_message} json_data = json.dumps(data) sns = boto3.client('sns') sns.publish( TopicArn='arn:aws:sns:us-east-1:374175877904:hamster_alerts', Message=str(json_data))
cr_country = lead_dict.get('Country') cert_country = lead_dict.get('Country') if(cr_country == 'Bolivia'): cert_country = 'Bolivia, Plurinational State of' elif(cr_country == 'Iran'): cert_country = 'Iran, Islamic Republic of' elif(cr_country == 'North Korea'): cert_country = 'Korea, Democratic People\'s Republic of' elif(cr_country == 'South Korea'): cert_country = 'Korea, Republic of' elif(cr_country == 'Laos'): cert_country = 'Lao People\'s Democratic Republic' elif(cr_country == 'Moldova'): cert_country = 'Moldova, Republic of' elif(cr_country == 'Marshall Islands'): cert_country = 'Saint Martin (French part)' elif(cr_country == 'Macedonia'): cert_country = 'Greece' elif(cr_country == 'Russia'): cert_country = 'Russian Federation' elif(cr_country == 'Saint Helena'): cert_country = 'Saint Helena, Ascension and Tristan da Cunha' elif(cr_country == 'Tanzania'): cert_country = 'Tanzania, United Republic of' elif(cr_country == 'Vatican City State'): cert_country = 'Holy See (Vatican City State)' elif(cr_country == 'Venezuela'): cert_country = 'Venezuela, Bolivarian Republic of' elif(cr_country == 'Viet nam'): cert_country = 'Vietnam' return cert_country
identifier_body
transfer_leads.py
import json from simple_salesforce import Salesforce import os import boto3 import datetime def lambda_handler(event, context): #gathers JSON file from S3 that was posted from Chrome River SFDC via the transfer_leads_trigger lambda function bucket = event['Records'][0]['s3']['bucket']['name'] key = event['Records'][0]['s3']['object']['key'] s3=boto3.resource('s3') obj = s3.Object(event['Records'][0]['s3']['bucket']['name'],event['Records'][0]['s3']['object']['key']) body = obj.get()['Body'].read() input_body = json.loads(body) idList = input_body.get('Idlist') #gathers leads via SOQL through simple Salesforce library lead_list = _get_lead_list(idList) #standardizes picklist field values and creates value for Chrome River Transfer Notes field standardized_list = add_notes_and_standardize(lead_list) #sends to Certify SFDC instance result_dict = send_to_certify(standardized_list) print(result_dict) #posts notification to Slack upon failure to insert to Certify SFDC if(result_dict[0].get('success') == False): message = f"LEAD TRANSFER TO CERTIFY FAILURE \n" message += f"failed lead insert for the following IDs: \n" for num in range(len(lead_list)): idval = lead_list[num].get("Id") message += idval if num != (len(idList) - 1): message += f", " else: message += "\n" message += f"Returned error log from Salesforce: \n" message += result_dict[0].get('errors')[0].get('message') _publish_alert(message) else: #deletes JSON file within S3 s3 = boto3.client('s3') s3.delete_object(Bucket=bucket,Key=key) return { 'statusCode': 200, 'body': json.dumps('Transfer complete') } def _get_lead_list(idList): query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE " id_query_string = "" for num in range(len(idList)): id_query_string += "(ID = '" + idList[num] + "')" if num != (len(idList) - 1): id_query_string += " OR " query_string += id_query_string sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host']) sf_data = sf.query_all(query_string) return sf_data['records'] def create_new_dict(lead_dict): new_dict = {} new_dict['FirstName'] = lead_dict.get('FirstName') new_dict['LastName'] = lead_dict.get('LastName') new_dict['Company'] = lead_dict.get('Company') new_dict['Title'] = lead_dict.get('Title') new_dict['Phone'] = lead_dict.get('Phone') new_dict['Email'] = lead_dict.get('Email') new_dict['Fax'] = lead_dict.get('Fax') new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c') new_dict['Street'] = lead_dict.get('Street') new_dict['State'] = lead_dict.get('State') new_dict['City'] = lead_dict.get('City') new_dict['PostalCode'] = lead_dict.get('PostalCode') new_dict['Country'] = lead_dict.get('Country') new_dict['Website'] = lead_dict.get('Website') new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees') new_dict['Industry'] = lead_dict.get('Industry') new_dict['LeadSource'] = lead_dict.get('LeadSource') new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c') new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c') new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c') print(new_dict) return new_dict def send_to_certify(lead_list): sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host']) return sf.bulk.Lead.insert(lead_list,batch_size=200) def add_notes_and_standardize(lead_list): new_dict_array = [] for lead in lead_list: lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead)) lead.__setitem__('Employee_Range__c',standardize_employee_range(lead)) lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead)) lead['LeadSource'] = 'Chrome River Transfer' lead['Industry'] = standardize_industry(lead) if(lead.get('Country') != None): lead['Country'] = standardize_country(lead) if(lead.get('State') != None): lead['State'] = standardize_state(lead) new_dict_array.append(create_new_dict(lead)) return new_dict_array def mql_verify(lead_dict): mql_status = False if (lead_dict.get('Recent_Conversion__c') != None): mql_status = True print(mql_status) return mql_status def generate_cr_notes_field(lead_dict): note_text = '' if(lead_dict.get('LeadSource') != None): note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | ' if(lead_dict.get('Recent_Conversion__c') != None): note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |' if(lead_dict.get('Tasks') != None): note_text += generate_task_summary(lead_dict.get('Tasks').get('records')) if(lead_dict.get('CampaignMembers') != None): note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records')) return note_text def generate_task_summary(task_list): task_text = 'Tasks: ' for task in task_list: task_text += ' ( ' + task.get('Subject') + ' ) ' task_text += '| ' return task_text def generate_campaign_summary(campaign_mem_list): campaign_mem_text = 'Campaigns: ' for campaign in campaign_mem_list: campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) ' campaign_mem_text += '| ' return campaign_mem_text def standardize_employee_range(lead_dict): e_count = lead_dict.get('NumberOfEmployees') e_range = '' if(e_count < 26): e_range = '1-25' elif(e_count > 26): e_range = '26-200' return e_range def standardize_industry(lead_dict): cr_industry = lead_dict.get('Industry') cert_industry = lead_dict.get('Industry') if(cr_industry == 'Accounting'): cert_industry = 'Business Services' elif(cr_industry == 'Advertising'): cert_industry = 'Business Services' elif(cr_industry == 'Apparel'): cert_industry = 'Manufacturing' elif(cr_industry == 'Architecture'): cert_industry = 'Business Services' elif(cr_industry == 'Banking'): cert_industry = 'Finance' elif(cr_industry == 'Biotechnology'): cert_industry = 'Healthcare' elif(cr_industry == 'Chemicals'): cert_industry = 'Manufacturing' elif(cr_industry == 'Communications'): cert_industry = 'Telecommunications' elif(cr_industry == 'Consulting'): cert_industry = 'Business Services' elif(cr_industry == 'Electronics'): cert_industry = 'Manufacturing' elif(cr_industry == 'Energy'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Engineering'): cert_industry = 'Business Services' elif(cr_industry == 'Entertainment'): cert_industry = 'Consumer Services' elif(cr_industry == 'Environmental'): cert_industry = 'Energy, Utilities & Waste Treatment' elif(cr_industry == 'Food & Beverage'): cert_industry = 'Consumer Services' elif(cr_industry == 'Machinery'): cert_industry = 'Industrial' elif(cr_industry == 'Media'): cert_industry = 'Media & Internet' elif(cr_industry == 'Not For Profit'): cert_industry = 'Organizations' elif(cr_industry == 'Other'): cert_industry = 'Industrial' elif(cr_industry == 'Professional Service'): cert_industry = 'Business Services' elif(cr_industry == 'Public Relations'): cert_industry = 'Business Services' elif(cr_industry == 'Recreation'): cert_industry = 'Consumer Services' elif(cr_industry == 'Shipping'): cert_industry = 'Transportation' elif(cr_industry == 'Sports'): cert_industry = 'Media & Internet' elif(cr_industry == 'Technology'): cert_industry = 'Software' elif(cr_industry == 'Telecom'): cert_industry = 'Telecommunications' elif(cr_industry == 'Travel'): cert_industry = 'Consumer Services' elif(cr_industry == 'Utilities'): cert_industry = 'Energy, Utilities & Waste Treatment' else: cert_industry = 'Industrial' return cert_industry def standardize_country(lead_dict): cr_country = lead_dict.get('Country') cert_country = lead_dict.get('Country') if(cr_country == 'Bolivia'): cert_country = 'Bolivia, Plurinational State of' elif(cr_country == 'Iran'): cert_country = 'Iran, Islamic Republic of' elif(cr_country == 'North Korea'): cert_country = 'Korea, Democratic People\'s Republic of' elif(cr_country == 'South Korea'): cert_country = 'Korea, Republic of' elif(cr_country == 'Laos'): cert_country = 'Lao People\'s Democratic Republic' elif(cr_country == 'Moldova'): cert_country = 'Moldova, Republic of' elif(cr_country == 'Marshall Islands'):
cert_country = 'Saint Martin (French part)' elif(cr_country == 'Macedonia'): cert_country = 'Greece' elif(cr_country == 'Russia'): cert_country = 'Russian Federation' elif(cr_country == 'Saint Helena'): cert_country = 'Saint Helena, Ascension and Tristan da Cunha' elif(cr_country == 'Tanzania'): cert_country = 'Tanzania, United Republic of' elif(cr_country == 'Vatican City State'): cert_country = 'Holy See (Vatican City State)' elif(cr_country == 'Venezuela'): cert_country = 'Venezuela, Bolivarian Republic of' elif(cr_country == 'Viet nam'): cert_country = 'Vietnam' return cert_country def standardize_state(lead_dict): cert_country = lead_dict.get('Country') cr_state = lead_dict.get('State') cert_state = lead_dict.get('State') if(cert_country == 'Australia'): if(cr_state == 'Brisbane'): cert_state = 'Queensland' if(cert_country == 'China'): if(cr_state == 'Chinese Taipei'): cert_state = 'Taiwan' if(cert_country == 'United Kingdom'): cert_state = None return cert_state def _publish_alert(alert_message): data = {'message':alert_message} json_data = json.dumps(data) sns = boto3.client('sns') sns.publish( TopicArn='arn:aws:sns:us-east-1:374175877904:hamster_alerts', Message=str(json_data))
random_line_split
caclient.go
/* Copyright: Cognition Foundry. All Rights Reserved. License: Apache License Version 2.0 */ package gohfc import ( "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io/ioutil" "net/http" ) // CAClient is common interface for Certificate authority services. type CAClient interface { // Enroll enrolls user and returns ECert,CSR used for certificate and error Enroll(enrollmentId, password string) (*Identity, []byte, error) // Register registers new user in fabric-ca server. Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) // Revoke revokes ECert in fabric-ca server. Revoke(identity *Identity, req *CARevocationRequest) (*CAResponse, error) // ReEnroll create new certificate from old (valid) one. ReEnroll(identity *Identity) (*Identity, error) } // RegistrationRequest holds all data needed for new registration of new user in Certificate Authority type CARegistrationRequest struct { // EnrolmentId is unique name that identifies identity EnrolmentId string `json:"id"` // Type defines type of this identity (user,client, auditor etc...) Type string `json:"type"` // Secret is password that will be used for enrollment. If not provided random password will be generated Secret string `json:"secret,omitempty"` // MaxEnrollments define maximum number of times that identity can enroll. If not provided or is 0 there is no limit MaxEnrollments int `json:"max_enrollments,omitempty"` // Affiliation associates identity with particular organisation. // for example org1.department1 makes this identity part of organisation `org1` and department `department1` Affiliation string `json:"affiliation"` // Attrs are attributes associated with this identity Attrs []*CARegistrationRequestAttr `json:"attrs"` } // CARegistrationRequestAttr holds user attribute used for registration // for example user may have attr `accountType` with value `premium` // this attributes can be accessed in chainCode and build business logic on top of them type CARegistrationRequestAttr struct { Name string `json:"name"` Value string `json:"value"` } // CARevocationRequest holds data needed to revoke certificate in fabric-ca // If AKI and Serial are provided this will revoke specific certificate. // If EnrolmentID is provided all certificated for this EnrollmentID will be revoked and all his/hers future attempts // to enroll will fail. type CARevocationRequest struct { // EnrollmentId of the identity whose certificates should be revoked // If this field is omitted, then Serial and AKI must be specified. EnrollmentId string `json:"id,omitempty"` // Serial number of the certificate to be revoked // If this is omitted, then EnrollmentId must be specified Serial string `json:"serial,omitempty"` // AKI (Authority Key Identifier) of the certificate to be revoked AKI string `json:"aki,omitempty"` // Reason is the reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for // valid values. The default value is 0 (ocsp.Unspecified). Reason int `json:"reason,omitempty"` } // CAResponse represents response message from fabric-ca server type CAResponse struct { Success bool `json:"success"` Result CARegisterCredentialResponse `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } // CARegisterCredentialResponse credentials from fabric-ca server registration request type CARegisterCredentialResponse struct { Secret string `json:"secret"` } // CAResponseErr represents error message from fabric-ca server type CAResponseErr struct { Code int `json:"code"` Message string `json:"message"` } // certificateRequest holds certificate request that must be signed by fabric-ca type CertificateRequest struct { CR string `json:"certificate_request"` } // FabricCAClientImpl is client implementation for fabric-ca server type FabricCAClientImpl struct { // Uri is access point for fabric-ca server. Port number and scheme must be provided. // for example http://127.0.0.1:7054 Url string // SkipTLSVerification define how connection must handle invalid TLC certificates. // if true, all verifications are skipped. This value is overwritten by Transport property, if provided SkipTLSVerification bool // Crypto is CryptSuite implementation used to sign request for fabric-ca server Crypto CryptoSuite // Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used // It is responsibility of the user to provide proper TLS/certificate setting in TLS communication. Transport *http.Transport } // enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert type enrollmentResponse struct { Success bool `json:"success"` Result enrollmentResponseResult `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } type enrollmentResponseResult struct { Cert string ServerInfo enrollmentResponseServerInfo } type enrollmentResponseServerInfo struct { CAName string CAChain string } // Register registers new user in fabric-ca server. In registration request attributes, affiliation and // max enrolments must be set. On success, password will be in CAResponse.Result.Credential. // If password is not provided, random secret will be generated. // It is responsibility of the SDK user to ensure passwords are with big entropy. // Certificate parameter is certificate for user that makes registration and this user MUST have the role for // registering new users. func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) { if req.EnrolmentId == "" { return nil, ErrEnrolmentMissing } if req.Affiliation == "" { return nil, ErrAffiliationMissing } if req.Type == "" { return nil, ErrTypeMissing } if identity == nil { return nil, ErrCertificateEmpty } reqJson, err := json.Marshal(req) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/register", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // Enroll execute enrollment request for registered user in fabric-ca server. // On success new Identity with ECert is returned func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) { if len(enrollmentId) < 1 { return nil, nil, ErrEnrollmentIdMissing } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, nil, err } csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key) if err != nil { return nil, nil, err } url := fmt.Sprintf("%s/api/v1/enroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(enrollmentId, password) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, nil, err } if !enrResp.Success { return nil, nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, nil, err } return &Identity{Certificate: cert, PrivateKey: key}, csr, nil } // Revoke revokes ECert in fabric-ca server. // Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know // about this certificate revocation. // It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list. func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) { reqJson, err := json.Marshal(request) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/revoke", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved. func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) { if identity == nil || identity.EnrollmentId() == "" { return nil, ErrCertificateEmpty } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, err } csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/reenroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, crm) if err != nil { return nil, err } req.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, err } if !enrResp.Success { return nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, err } return &Identity{Certificate: cert, PrivateKey: key}, nil } // createAuthToken creates http authorization header token to verify the request. // it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key func (f *FabricCAClientImpl)
(identity *Identity, request []byte) (string, error) { encPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: identity.Certificate.Raw}) encCert := base64.StdEncoding.EncodeToString(encPem) body := base64.StdEncoding.EncodeToString(request) sigString := body + "." + encCert sig, err := f.Crypto.Sign([]byte(sigString), identity.PrivateKey) if err != nil { return "", err } return fmt.Sprintf("%s.%s", encCert, base64.StdEncoding.EncodeToString(sig)), nil } // NewFabricCAClient creates new FabricCAClientImpl func NewCAClient(path string, transport *http.Transport) (CAClient, error) { config,err:=NewCAConfig(path) if err!=nil{ return nil,err } var crypto CryptoSuite switch config.CryptoConfig.Family { case "ecdsa": crypto, err = NewECCryptSuiteFromConfig(config.CryptoConfig) if err != nil { return nil, err } default: return nil, ErrInvalidAlgorithmFamily } return &FabricCAClientImpl{SkipTLSVerification: config.SkipTLSValidation, Url: config.Uri, Crypto: crypto, Transport: transport}, nil }
createAuthToken
identifier_name
caclient.go
/* Copyright: Cognition Foundry. All Rights Reserved. License: Apache License Version 2.0 */ package gohfc import ( "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io/ioutil" "net/http" ) // CAClient is common interface for Certificate authority services. type CAClient interface { // Enroll enrolls user and returns ECert,CSR used for certificate and error Enroll(enrollmentId, password string) (*Identity, []byte, error) // Register registers new user in fabric-ca server. Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) // Revoke revokes ECert in fabric-ca server. Revoke(identity *Identity, req *CARevocationRequest) (*CAResponse, error) // ReEnroll create new certificate from old (valid) one. ReEnroll(identity *Identity) (*Identity, error) } // RegistrationRequest holds all data needed for new registration of new user in Certificate Authority type CARegistrationRequest struct { // EnrolmentId is unique name that identifies identity EnrolmentId string `json:"id"` // Type defines type of this identity (user,client, auditor etc...) Type string `json:"type"` // Secret is password that will be used for enrollment. If not provided random password will be generated Secret string `json:"secret,omitempty"` // MaxEnrollments define maximum number of times that identity can enroll. If not provided or is 0 there is no limit MaxEnrollments int `json:"max_enrollments,omitempty"` // Affiliation associates identity with particular organisation. // for example org1.department1 makes this identity part of organisation `org1` and department `department1` Affiliation string `json:"affiliation"` // Attrs are attributes associated with this identity Attrs []*CARegistrationRequestAttr `json:"attrs"` } // CARegistrationRequestAttr holds user attribute used for registration // for example user may have attr `accountType` with value `premium` // this attributes can be accessed in chainCode and build business logic on top of them type CARegistrationRequestAttr struct { Name string `json:"name"` Value string `json:"value"` } // CARevocationRequest holds data needed to revoke certificate in fabric-ca // If AKI and Serial are provided this will revoke specific certificate. // If EnrolmentID is provided all certificated for this EnrollmentID will be revoked and all his/hers future attempts // to enroll will fail. type CARevocationRequest struct { // EnrollmentId of the identity whose certificates should be revoked // If this field is omitted, then Serial and AKI must be specified. EnrollmentId string `json:"id,omitempty"` // Serial number of the certificate to be revoked // If this is omitted, then EnrollmentId must be specified Serial string `json:"serial,omitempty"` // AKI (Authority Key Identifier) of the certificate to be revoked AKI string `json:"aki,omitempty"` // Reason is the reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for // valid values. The default value is 0 (ocsp.Unspecified). Reason int `json:"reason,omitempty"` } // CAResponse represents response message from fabric-ca server type CAResponse struct { Success bool `json:"success"` Result CARegisterCredentialResponse `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } // CARegisterCredentialResponse credentials from fabric-ca server registration request type CARegisterCredentialResponse struct { Secret string `json:"secret"` } // CAResponseErr represents error message from fabric-ca server type CAResponseErr struct { Code int `json:"code"` Message string `json:"message"` } // certificateRequest holds certificate request that must be signed by fabric-ca type CertificateRequest struct { CR string `json:"certificate_request"` } // FabricCAClientImpl is client implementation for fabric-ca server type FabricCAClientImpl struct { // Uri is access point for fabric-ca server. Port number and scheme must be provided. // for example http://127.0.0.1:7054 Url string // SkipTLSVerification define how connection must handle invalid TLC certificates. // if true, all verifications are skipped. This value is overwritten by Transport property, if provided SkipTLSVerification bool // Crypto is CryptSuite implementation used to sign request for fabric-ca server Crypto CryptoSuite // Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used // It is responsibility of the user to provide proper TLS/certificate setting in TLS communication. Transport *http.Transport } // enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert type enrollmentResponse struct { Success bool `json:"success"` Result enrollmentResponseResult `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } type enrollmentResponseResult struct { Cert string ServerInfo enrollmentResponseServerInfo } type enrollmentResponseServerInfo struct { CAName string CAChain string } // Register registers new user in fabric-ca server. In registration request attributes, affiliation and // max enrolments must be set. On success, password will be in CAResponse.Result.Credential. // If password is not provided, random secret will be generated. // It is responsibility of the SDK user to ensure passwords are with big entropy. // Certificate parameter is certificate for user that makes registration and this user MUST have the role for // registering new users. func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) { if req.EnrolmentId == "" { return nil, ErrEnrolmentMissing } if req.Affiliation == "" { return nil, ErrAffiliationMissing } if req.Type == "" { return nil, ErrTypeMissing } if identity == nil { return nil, ErrCertificateEmpty } reqJson, err := json.Marshal(req) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/register", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil
return result, nil } // Enroll execute enrollment request for registered user in fabric-ca server. // On success new Identity with ECert is returned func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) { if len(enrollmentId) < 1 { return nil, nil, ErrEnrollmentIdMissing } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, nil, err } csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key) if err != nil { return nil, nil, err } url := fmt.Sprintf("%s/api/v1/enroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(enrollmentId, password) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, nil, err } if !enrResp.Success { return nil, nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, nil, err } return &Identity{Certificate: cert, PrivateKey: key}, csr, nil } // Revoke revokes ECert in fabric-ca server. // Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know // about this certificate revocation. // It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list. func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) { reqJson, err := json.Marshal(request) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/revoke", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved. func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) { if identity == nil || identity.EnrollmentId() == "" { return nil, ErrCertificateEmpty } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, err } csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/reenroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, crm) if err != nil { return nil, err } req.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, err } if !enrResp.Success { return nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, err } return &Identity{Certificate: cert, PrivateKey: key}, nil } // createAuthToken creates http authorization header token to verify the request. // it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key func (f *FabricCAClientImpl) createAuthToken(identity *Identity, request []byte) (string, error) { encPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: identity.Certificate.Raw}) encCert := base64.StdEncoding.EncodeToString(encPem) body := base64.StdEncoding.EncodeToString(request) sigString := body + "." + encCert sig, err := f.Crypto.Sign([]byte(sigString), identity.PrivateKey) if err != nil { return "", err } return fmt.Sprintf("%s.%s", encCert, base64.StdEncoding.EncodeToString(sig)), nil } // NewFabricCAClient creates new FabricCAClientImpl func NewCAClient(path string, transport *http.Transport) (CAClient, error) { config,err:=NewCAConfig(path) if err!=nil{ return nil,err } var crypto CryptoSuite switch config.CryptoConfig.Family { case "ecdsa": crypto, err = NewECCryptSuiteFromConfig(config.CryptoConfig) if err != nil { return nil, err } default: return nil, ErrInvalidAlgorithmFamily } return &FabricCAClientImpl{SkipTLSVerification: config.SkipTLSValidation, Url: config.Uri, Crypto: crypto, Transport: transport}, nil }
{ return nil, err }
conditional_block
caclient.go
/* Copyright: Cognition Foundry. All Rights Reserved. License: Apache License Version 2.0 */ package gohfc import ( "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io/ioutil" "net/http" ) // CAClient is common interface for Certificate authority services. type CAClient interface { // Enroll enrolls user and returns ECert,CSR used for certificate and error Enroll(enrollmentId, password string) (*Identity, []byte, error) // Register registers new user in fabric-ca server. Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) // Revoke revokes ECert in fabric-ca server. Revoke(identity *Identity, req *CARevocationRequest) (*CAResponse, error) // ReEnroll create new certificate from old (valid) one. ReEnroll(identity *Identity) (*Identity, error) } // RegistrationRequest holds all data needed for new registration of new user in Certificate Authority type CARegistrationRequest struct { // EnrolmentId is unique name that identifies identity EnrolmentId string `json:"id"` // Type defines type of this identity (user,client, auditor etc...) Type string `json:"type"` // Secret is password that will be used for enrollment. If not provided random password will be generated Secret string `json:"secret,omitempty"` // MaxEnrollments define maximum number of times that identity can enroll. If not provided or is 0 there is no limit MaxEnrollments int `json:"max_enrollments,omitempty"` // Affiliation associates identity with particular organisation. // for example org1.department1 makes this identity part of organisation `org1` and department `department1` Affiliation string `json:"affiliation"` // Attrs are attributes associated with this identity Attrs []*CARegistrationRequestAttr `json:"attrs"` } // CARegistrationRequestAttr holds user attribute used for registration // for example user may have attr `accountType` with value `premium` // this attributes can be accessed in chainCode and build business logic on top of them type CARegistrationRequestAttr struct { Name string `json:"name"` Value string `json:"value"` } // CARevocationRequest holds data needed to revoke certificate in fabric-ca // If AKI and Serial are provided this will revoke specific certificate. // If EnrolmentID is provided all certificated for this EnrollmentID will be revoked and all his/hers future attempts // to enroll will fail. type CARevocationRequest struct { // EnrollmentId of the identity whose certificates should be revoked // If this field is omitted, then Serial and AKI must be specified. EnrollmentId string `json:"id,omitempty"` // Serial number of the certificate to be revoked // If this is omitted, then EnrollmentId must be specified Serial string `json:"serial,omitempty"` // AKI (Authority Key Identifier) of the certificate to be revoked AKI string `json:"aki,omitempty"` // Reason is the reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for // valid values. The default value is 0 (ocsp.Unspecified). Reason int `json:"reason,omitempty"` } // CAResponse represents response message from fabric-ca server type CAResponse struct { Success bool `json:"success"` Result CARegisterCredentialResponse `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } // CARegisterCredentialResponse credentials from fabric-ca server registration request type CARegisterCredentialResponse struct { Secret string `json:"secret"` } // CAResponseErr represents error message from fabric-ca server type CAResponseErr struct { Code int `json:"code"` Message string `json:"message"` } // certificateRequest holds certificate request that must be signed by fabric-ca type CertificateRequest struct { CR string `json:"certificate_request"` } // FabricCAClientImpl is client implementation for fabric-ca server type FabricCAClientImpl struct { // Uri is access point for fabric-ca server. Port number and scheme must be provided. // for example http://127.0.0.1:7054 Url string // SkipTLSVerification define how connection must handle invalid TLC certificates. // if true, all verifications are skipped. This value is overwritten by Transport property, if provided SkipTLSVerification bool // Crypto is CryptSuite implementation used to sign request for fabric-ca server Crypto CryptoSuite // Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used // It is responsibility of the user to provide proper TLS/certificate setting in TLS communication. Transport *http.Transport } // enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert type enrollmentResponse struct { Success bool `json:"success"` Result enrollmentResponseResult `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } type enrollmentResponseResult struct { Cert string ServerInfo enrollmentResponseServerInfo } type enrollmentResponseServerInfo struct { CAName string CAChain string } // Register registers new user in fabric-ca server. In registration request attributes, affiliation and // max enrolments must be set. On success, password will be in CAResponse.Result.Credential. // If password is not provided, random secret will be generated. // It is responsibility of the SDK user to ensure passwords are with big entropy. // Certificate parameter is certificate for user that makes registration and this user MUST have the role for // registering new users. func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) { if req.EnrolmentId == "" { return nil, ErrEnrolmentMissing } if req.Affiliation == "" { return nil, ErrAffiliationMissing } if req.Type == "" { return nil, ErrTypeMissing } if identity == nil { return nil, ErrCertificateEmpty } reqJson, err := json.Marshal(req) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/register", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // Enroll execute enrollment request for registered user in fabric-ca server. // On success new Identity with ECert is returned func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) { if len(enrollmentId) < 1 { return nil, nil, ErrEnrollmentIdMissing } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, nil, err } csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key) if err != nil { return nil, nil, err } url := fmt.Sprintf("%s/api/v1/enroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(enrollmentId, password) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, nil, err } if !enrResp.Success { return nil, nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, nil, err } return &Identity{Certificate: cert, PrivateKey: key}, csr, nil } // Revoke revokes ECert in fabric-ca server. // Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know // about this certificate revocation. // It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list. func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) { reqJson, err := json.Marshal(request) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/revoke", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved. func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) { if identity == nil || identity.EnrollmentId() == "" { return nil, ErrCertificateEmpty } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, err } csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/reenroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, crm) if err != nil { return nil, err } req.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, err } if !enrResp.Success { return nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, err } return &Identity{Certificate: cert, PrivateKey: key}, nil } // createAuthToken creates http authorization header token to verify the request. // it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key func (f *FabricCAClientImpl) createAuthToken(identity *Identity, request []byte) (string, error) { encPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: identity.Certificate.Raw}) encCert := base64.StdEncoding.EncodeToString(encPem) body := base64.StdEncoding.EncodeToString(request) sigString := body + "." + encCert sig, err := f.Crypto.Sign([]byte(sigString), identity.PrivateKey) if err != nil { return "", err } return fmt.Sprintf("%s.%s", encCert, base64.StdEncoding.EncodeToString(sig)), nil } // NewFabricCAClient creates new FabricCAClientImpl func NewCAClient(path string, transport *http.Transport) (CAClient, error) { config,err:=NewCAConfig(path) if err!=nil{ return nil,err } var crypto CryptoSuite switch config.CryptoConfig.Family { case "ecdsa": crypto, err = NewECCryptSuiteFromConfig(config.CryptoConfig) if err != nil { return nil, err } default: return nil, ErrInvalidAlgorithmFamily } return &FabricCAClientImpl{SkipTLSVerification: config.SkipTLSValidation, Url: config.Uri, Crypto: crypto, Transport: transport}, nil }
random_line_split
caclient.go
/* Copyright: Cognition Foundry. All Rights Reserved. License: Apache License Version 2.0 */ package gohfc import ( "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io/ioutil" "net/http" ) // CAClient is common interface for Certificate authority services. type CAClient interface { // Enroll enrolls user and returns ECert,CSR used for certificate and error Enroll(enrollmentId, password string) (*Identity, []byte, error) // Register registers new user in fabric-ca server. Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) // Revoke revokes ECert in fabric-ca server. Revoke(identity *Identity, req *CARevocationRequest) (*CAResponse, error) // ReEnroll create new certificate from old (valid) one. ReEnroll(identity *Identity) (*Identity, error) } // RegistrationRequest holds all data needed for new registration of new user in Certificate Authority type CARegistrationRequest struct { // EnrolmentId is unique name that identifies identity EnrolmentId string `json:"id"` // Type defines type of this identity (user,client, auditor etc...) Type string `json:"type"` // Secret is password that will be used for enrollment. If not provided random password will be generated Secret string `json:"secret,omitempty"` // MaxEnrollments define maximum number of times that identity can enroll. If not provided or is 0 there is no limit MaxEnrollments int `json:"max_enrollments,omitempty"` // Affiliation associates identity with particular organisation. // for example org1.department1 makes this identity part of organisation `org1` and department `department1` Affiliation string `json:"affiliation"` // Attrs are attributes associated with this identity Attrs []*CARegistrationRequestAttr `json:"attrs"` } // CARegistrationRequestAttr holds user attribute used for registration // for example user may have attr `accountType` with value `premium` // this attributes can be accessed in chainCode and build business logic on top of them type CARegistrationRequestAttr struct { Name string `json:"name"` Value string `json:"value"` } // CARevocationRequest holds data needed to revoke certificate in fabric-ca // If AKI and Serial are provided this will revoke specific certificate. // If EnrolmentID is provided all certificated for this EnrollmentID will be revoked and all his/hers future attempts // to enroll will fail. type CARevocationRequest struct { // EnrollmentId of the identity whose certificates should be revoked // If this field is omitted, then Serial and AKI must be specified. EnrollmentId string `json:"id,omitempty"` // Serial number of the certificate to be revoked // If this is omitted, then EnrollmentId must be specified Serial string `json:"serial,omitempty"` // AKI (Authority Key Identifier) of the certificate to be revoked AKI string `json:"aki,omitempty"` // Reason is the reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for // valid values. The default value is 0 (ocsp.Unspecified). Reason int `json:"reason,omitempty"` } // CAResponse represents response message from fabric-ca server type CAResponse struct { Success bool `json:"success"` Result CARegisterCredentialResponse `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } // CARegisterCredentialResponse credentials from fabric-ca server registration request type CARegisterCredentialResponse struct { Secret string `json:"secret"` } // CAResponseErr represents error message from fabric-ca server type CAResponseErr struct { Code int `json:"code"` Message string `json:"message"` } // certificateRequest holds certificate request that must be signed by fabric-ca type CertificateRequest struct { CR string `json:"certificate_request"` } // FabricCAClientImpl is client implementation for fabric-ca server type FabricCAClientImpl struct { // Uri is access point for fabric-ca server. Port number and scheme must be provided. // for example http://127.0.0.1:7054 Url string // SkipTLSVerification define how connection must handle invalid TLC certificates. // if true, all verifications are skipped. This value is overwritten by Transport property, if provided SkipTLSVerification bool // Crypto is CryptSuite implementation used to sign request for fabric-ca server Crypto CryptoSuite // Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used // It is responsibility of the user to provide proper TLS/certificate setting in TLS communication. Transport *http.Transport } // enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert type enrollmentResponse struct { Success bool `json:"success"` Result enrollmentResponseResult `json:"result"` Errors []CAResponseErr `json:"errors"` Messages []string `json:"messages"` } type enrollmentResponseResult struct { Cert string ServerInfo enrollmentResponseServerInfo } type enrollmentResponseServerInfo struct { CAName string CAChain string } // Register registers new user in fabric-ca server. In registration request attributes, affiliation and // max enrolments must be set. On success, password will be in CAResponse.Result.Credential. // If password is not provided, random secret will be generated. // It is responsibility of the SDK user to ensure passwords are with big entropy. // Certificate parameter is certificate for user that makes registration and this user MUST have the role for // registering new users. func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) { if req.EnrolmentId == "" { return nil, ErrEnrolmentMissing } if req.Affiliation == "" { return nil, ErrAffiliationMissing } if req.Type == "" { return nil, ErrTypeMissing } if identity == nil { return nil, ErrCertificateEmpty } reqJson, err := json.Marshal(req) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/register", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // Enroll execute enrollment request for registered user in fabric-ca server. // On success new Identity with ECert is returned func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) { if len(enrollmentId) < 1 { return nil, nil, ErrEnrollmentIdMissing } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, nil, err } csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key) if err != nil { return nil, nil, err } url := fmt.Sprintf("%s/api/v1/enroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(enrollmentId, password) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, nil, err } if !enrResp.Success { return nil, nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, nil, err } return &Identity{Certificate: cert, PrivateKey: key}, csr, nil } // Revoke revokes ECert in fabric-ca server. // Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know // about this certificate revocation. // It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list. func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) { reqJson, err := json.Marshal(request) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/revoke", f.Url) httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson)) httpReq.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, reqJson) if err != nil { return nil, err } httpReq.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(httpReq) if err != nil { return nil, err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) result := new(CAResponse) if err := json.Unmarshal(body, result); err != nil { return nil, err } return result, nil } // ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved. func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) { if identity == nil || identity.EnrollmentId() == "" { return nil, ErrCertificateEmpty } // create new cert and send it to CA for signing key, err := f.Crypto.GenerateKey() if err != nil { return nil, err } csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key) if err != nil { return nil, err } url := fmt.Sprintf("%s/api/v1/reenroll", f.Url) crm, err := json.Marshal(CertificateRequest{CR: string(csr)}) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm)) req.Header.Set("Content-Type", "application/json") token, err := f.createAuthToken(identity, crm) if err != nil { return nil, err } req.Header.Set("authorization", token) var tr *http.Transport if f.Transport == nil { tr = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification}, } } else { tr = f.Transport } httpClient := &http.Client{Transport: tr} resp, err := httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } enrResp := new(enrollmentResponse) if err := json.Unmarshal(body, enrResp); err != nil { return nil, err } if !enrResp.Success { return nil, ErrEnrollment } rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert) if err != nil { return nil, err } a, _ := pem.Decode(rawCert) cert, err := x509.ParseCertificate(a.Bytes) if err != nil { return nil, err } return &Identity{Certificate: cert, PrivateKey: key}, nil } // createAuthToken creates http authorization header token to verify the request. // it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key func (f *FabricCAClientImpl) createAuthToken(identity *Identity, request []byte) (string, error)
// NewFabricCAClient creates new FabricCAClientImpl func NewCAClient(path string, transport *http.Transport) (CAClient, error) { config,err:=NewCAConfig(path) if err!=nil{ return nil,err } var crypto CryptoSuite switch config.CryptoConfig.Family { case "ecdsa": crypto, err = NewECCryptSuiteFromConfig(config.CryptoConfig) if err != nil { return nil, err } default: return nil, ErrInvalidAlgorithmFamily } return &FabricCAClientImpl{SkipTLSVerification: config.SkipTLSValidation, Url: config.Uri, Crypto: crypto, Transport: transport}, nil }
{ encPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: identity.Certificate.Raw}) encCert := base64.StdEncoding.EncodeToString(encPem) body := base64.StdEncoding.EncodeToString(request) sigString := body + "." + encCert sig, err := f.Crypto.Sign([]byte(sigString), identity.PrivateKey) if err != nil { return "", err } return fmt.Sprintf("%s.%s", encCert, base64.StdEncoding.EncodeToString(sig)), nil }
identifier_body
machine_amd64.go
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build amd64 // +build amd64 package kvm import ( "fmt" "math/big" "reflect" "runtime" "runtime/debug" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/cpuid" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" ) // initArchState initializes architecture-specific state. func (m *machine) initArchState() error { // Set the legacy TSS address. This address is covered by the reserved // range (up to 4GB). In fact, this is a main reason it exists. if _, _, errno := unix.RawSyscall( unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } // Initialize all vCPUs to minimize kvm ioctl-s allowed by seccomp filters. m.mu.Lock() for i := 0; i < m.maxVCPUs; i++ { m.createVCPU(i) } m.mu.Unlock() c := m.Get() defer m.Put(c) // Enable CPUID faulting, if possible. Note that this also serves as a // basic platform sanity tests, since we will enter guest mode for the // first time here. The recovery is necessary, since if we fail to read // the platform info register, we will retry to host mode and // ultimately need to handle a segmentation fault. old := debug.SetPanicOnFault(true) defer func() { recover() debug.SetPanicOnFault(old) }() bluepill(c) ring0.SetCPUIDFaulting(true) return nil } type vCPUArchState struct { // PCIDs is the set of PCIDs for this vCPU. // // This starts above fixedKernelPCID. PCIDs *pagetables.PCIDs } const ( // fixedKernelPCID is a fixed kernel PCID used for the kernel page // tables. We must start allocating user PCIDs above this in order to // avoid any conflict (see below). fixedKernelPCID = 1 // poolPCIDs is the number of PCIDs to record in the database. As this // grows, assignment can take longer, since it is a simple linear scan. // Beyond a relatively small number, there are likely few perform // benefits, since the TLB has likely long since lost any translations // from more than a few PCIDs past. poolPCIDs = 8 ) // initArchState initializes architecture-specific state. func (c *vCPU)
() error { var ( kernelSystemRegs systemRegs kernelUserRegs userRegs ) // Set base control registers. kernelSystemRegs.CR0 = c.CR0() kernelSystemRegs.CR4 = c.CR4() kernelSystemRegs.EFER = c.EFER() // Set the IDT & GDT in the registers. kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT() kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT() kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode) kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata) kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata) tssBase, tssLimit, tss := c.TSS() kernelSystemRegs.TR.Load(tss, ring0.Tss) kernelSystemRegs.TR.base = tssBase kernelSystemRegs.TR.limit = uint32(tssLimit) // Point to kernel page tables, with no initial PCID. kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0) // Initialize the PCID database. if hasGuestPCID { // Note that NewPCIDs may return a nil table here, in which // case we simply don't use PCID support (see below). In // practice, this should not happen, however. c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs) } // Set the CPUID; this is required before setting system registers, // since KVM will reject several CR4 bits if the CPUID does not // indicate the support is available. if err := c.setCPUID(); err != nil { return err } // Set the entrypoint for the kernel. kernelUserRegs.RIP = uint64(ring0.AddrOfStart()) kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer()) kernelUserRegs.RSP = c.StackTop() kernelUserRegs.RFLAGS = ring0.KernelFlagsSet // Set the system registers. if err := c.setSystemRegisters(&kernelSystemRegs); err != nil { return err } // Set the user registers. if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 { return fmt.Errorf("error setting user registers: %v", errno) } // Set the time offset to the host native time. return c.setSystemTime() } // bitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. // It is set using getBitsForScaling when the KVM platform is initialized. var bitsForScaling int64 // getBitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. This allows us to replicate the (bad) math done by // the kernel below in scaledTSC, and ensure we can compute an exact zero // offset in setSystemTime. // // These constants correspond to kvm_tsc_scaling_ratio_frac_bits. func getBitsForScaling() int64 { fs := cpuid.HostFeatureSet() if fs.Intel() { return 48 // See vmx.c (kvm sources). } else if fs.AMD() { return 32 // See svm.c (svm sources). } else { return 63 // Unknown: theoretical maximum. } } // scaledTSC returns the host TSC scaled by the given frequency. // // This assumes a current frequency of 1. We require only the unitless ratio of // rawFreq to some current frequency. See setSystemTime for context. // // The kernel math guarantees that all bits of the multiplication and division // will be correctly preserved and applied. However, it is not possible to // actually store the ratio correctly. So we need to use the same schema in // order to calculate the scaled frequency and get the same result. // // We can assume that the current frequency is (1), so we are calculating a // strict inverse of this value. This simplifies this function considerably. // // Roughly, the returned value "scaledTSC" will have: // scaledTSC/hostTSC == 1/rawFreq // //go:nosplit func scaledTSC(rawFreq uintptr) int64 { scale := int64(1 << bitsForScaling) ratio := big.NewInt(scale / int64(rawFreq)) ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc()))) ratio.Div(ratio, big.NewInt(scale)) return ratio.Int64() } // setSystemTime sets the vCPU to the system time. func (c *vCPU) setSystemTime() error { // Attempt to set the offset directly. This is supported as of Linux 5.16, // or commit 828ca89628bfcb1b8f27535025f69dd00eb55207. if err := c.setTSCOffset(); err == nil { return err } // If tsc scaling is not supported, fallback to legacy mode. if !c.machine.tscControl { return c.setSystemTimeLegacy() } // First, scale down the clock frequency to the lowest value allowed by // the API itself. How low we can go depends on the underlying // hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD. // Even the lower bound here will take a 4GHz frequency down to 1Hz, // meaning that everything should be able to handle a Khz setting of 1 // with bits to spare. // // Note that reducing the clock does not typically require special // capabilities as it is emulated in KVM. We don't actually use this // capability, but it means that this method should be robust to // different hardware configurations. rawFreq, err := c.getTSCFreq() if err != nil { return c.setSystemTimeLegacy() } if err := c.setTSCFreq(1); err != nil { return c.setSystemTimeLegacy() } // Always restore the original frequency. defer func() { if err := c.setTSCFreq(rawFreq); err != nil { panic(err.Error()) } }() // Attempt to set the system time in this compressed world. The // calculation for offset normally looks like: // // offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc()); // // So as long as the kvm_scale_tsc component is constant before and // after the call to set the TSC value (and it is passes as the // target_tsc), we will compute an offset value of zero. // // This is effectively cheating to make our "setSystemTime" call so // unbelievably, incredibly fast that we do it "instantly" and all the // calculations result in an offset of zero. lastTSC := scaledTSC(rawFreq) for { if err := c.setTSC(uint64(lastTSC)); err != nil { return err } nextTSC := scaledTSC(rawFreq) if lastTSC == nextTSC { return nil } lastTSC = nextTSC // Try again. } } // nonCanonical generates a canonical address return. // //go:nosplit func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { *info = linux.SignalInfo{ Signo: signal, Code: linux.SI_KERNEL, } info.SetAddr(addr) // Include address. return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() if !user { // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = linux.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) accessType := hostarch.AccessType{} if signal == int32(unix.SIGSEGV) { accessType = hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0, } } if !accessType.Write && !accessType.Execute { info.Code = 1 // SEGV_MAPERR. } else { info.Code = 2 // SEGV_ACCERR. } return accessType, platform.ErrContextSignal } //go:nosplit //go:noinline func loadByte(ptr *byte) byte { return *ptr } // SwitchToUser unpacks architectural-details. func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) } else if !ring0.IsCanonical(regs.Rsp) { return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Fs_base) { return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Gs_base) { return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info) } // Assign PCIDs. if c.PCIDs != nil { var requireFlushPCID bool // Force a flush? switchOpts.UserPCID, requireFlushPCID = c.PCIDs.Assign(switchOpts.PageTables) switchOpts.KernelPCID = fixedKernelPCID switchOpts.Flush = switchOpts.Flush || requireFlushPCID } // See below. var vector ring0.Vector // Past this point, stack growth can cause system calls (and a break // from guest mode). So we need to ensure that between the bluepill // call here and the switch call immediately below, no additional // allocations occur. entersyscall() bluepill(c) vector = c.CPU.SwitchToUser(switchOpts) exitsyscall() switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.Debug, ring0.Breakpoint: *info = linux.SignalInfo{ Signo: int32(unix.SIGTRAP), Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, ring0.BoundRangeExceeded, ring0.InvalidTSS, ring0.StackSegmentFault: *info = linux.SignalInfo{ Signo: int32(unix.SIGSEGV), Code: linux.SI_KERNEL, } info.SetAddr(switchOpts.Registers.Rip) // Include address. if vector == ring0.GeneralProtectionFault { // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. return hostarch.AccessType{}, tryCPUIDError{} } return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = linux.SignalInfo{ Signo: int32(unix.SIGILL), Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = linux.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by // KVM itself, so we think some mapping is writeable but it's // really not. This could happen, e.g. if some file is // truncated (and would generate a SIGBUS) and we map it // directly into the instance. return c.fault(int32(unix.SIGBUS), info) case ring0.DeviceNotAvailable, ring0.DoubleFault, ring0.CoprocessorSegmentOverrun, ring0.MachineCheck, ring0.SecurityException: fallthrough default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } } func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { // Map all the executable regions so that all the entry functions // are mapped in the upper half. if err := applyVirtualRegions(func(vr virtualRegion) { if excludeVirtualRegion(vr) || vr.filename == "[vsyscall]" { return } if vr.accessType.Execute { r := vr.region physical, length, ok := translateToPhysical(r.virtual) if !ok || length < r.length { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, pagetables.MapOpts{AccessType: hostarch.Execute, Global: true}, physical) } }); err != nil { panic(fmt.Sprintf("error parsing /proc/self/maps: %v", err)) } for start, end := range m.kernel.EntryRegions() { regionLen := end - start physical, length, ok := translateToPhysical(start) if !ok || length < regionLen { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|start), regionLen, pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true}, physical) } } // getMaxVCPU get max vCPU number func (m *machine) getMaxVCPU() { maxVCPUs, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) if errno != 0 { m.maxVCPUs = _KVM_NR_VCPUS } else { m.maxVCPUs = int(maxVCPUs) } // The goal here is to avoid vCPU contentions for reasonable workloads. // But "reasonable" isn't defined well in this case. Let's say that CPU // overcommit with factor 2 is still acceptable. We allocate a set of // vCPU for each goruntime processor (P) and two sets of vCPUs to run // user code. rCPUs := runtime.GOMAXPROCS(0) if 3*rCPUs < m.maxVCPUs { m.maxVCPUs = 3 * rCPUs } } func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion { return physicalRegions }
initArchState
identifier_name
machine_amd64.go
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build amd64 // +build amd64 package kvm import ( "fmt" "math/big" "reflect" "runtime" "runtime/debug" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/cpuid" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" ) // initArchState initializes architecture-specific state. func (m *machine) initArchState() error { // Set the legacy TSS address. This address is covered by the reserved // range (up to 4GB). In fact, this is a main reason it exists. if _, _, errno := unix.RawSyscall( unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } // Initialize all vCPUs to minimize kvm ioctl-s allowed by seccomp filters. m.mu.Lock() for i := 0; i < m.maxVCPUs; i++ { m.createVCPU(i) } m.mu.Unlock() c := m.Get() defer m.Put(c) // Enable CPUID faulting, if possible. Note that this also serves as a // basic platform sanity tests, since we will enter guest mode for the // first time here. The recovery is necessary, since if we fail to read // the platform info register, we will retry to host mode and // ultimately need to handle a segmentation fault. old := debug.SetPanicOnFault(true) defer func() { recover() debug.SetPanicOnFault(old) }() bluepill(c) ring0.SetCPUIDFaulting(true) return nil } type vCPUArchState struct { // PCIDs is the set of PCIDs for this vCPU. // // This starts above fixedKernelPCID. PCIDs *pagetables.PCIDs } const ( // fixedKernelPCID is a fixed kernel PCID used for the kernel page // tables. We must start allocating user PCIDs above this in order to // avoid any conflict (see below). fixedKernelPCID = 1 // poolPCIDs is the number of PCIDs to record in the database. As this // grows, assignment can take longer, since it is a simple linear scan. // Beyond a relatively small number, there are likely few perform // benefits, since the TLB has likely long since lost any translations // from more than a few PCIDs past. poolPCIDs = 8 ) // initArchState initializes architecture-specific state. func (c *vCPU) initArchState() error { var ( kernelSystemRegs systemRegs kernelUserRegs userRegs ) // Set base control registers. kernelSystemRegs.CR0 = c.CR0() kernelSystemRegs.CR4 = c.CR4() kernelSystemRegs.EFER = c.EFER() // Set the IDT & GDT in the registers. kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT() kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT() kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode) kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata) kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata) tssBase, tssLimit, tss := c.TSS() kernelSystemRegs.TR.Load(tss, ring0.Tss) kernelSystemRegs.TR.base = tssBase kernelSystemRegs.TR.limit = uint32(tssLimit) // Point to kernel page tables, with no initial PCID. kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0) // Initialize the PCID database. if hasGuestPCID { // Note that NewPCIDs may return a nil table here, in which // case we simply don't use PCID support (see below). In // practice, this should not happen, however. c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs) } // Set the CPUID; this is required before setting system registers, // since KVM will reject several CR4 bits if the CPUID does not // indicate the support is available. if err := c.setCPUID(); err != nil { return err } // Set the entrypoint for the kernel. kernelUserRegs.RIP = uint64(ring0.AddrOfStart()) kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer()) kernelUserRegs.RSP = c.StackTop() kernelUserRegs.RFLAGS = ring0.KernelFlagsSet // Set the system registers. if err := c.setSystemRegisters(&kernelSystemRegs); err != nil { return err } // Set the user registers. if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 { return fmt.Errorf("error setting user registers: %v", errno) } // Set the time offset to the host native time. return c.setSystemTime() } // bitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. // It is set using getBitsForScaling when the KVM platform is initialized. var bitsForScaling int64 // getBitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. This allows us to replicate the (bad) math done by // the kernel below in scaledTSC, and ensure we can compute an exact zero // offset in setSystemTime. // // These constants correspond to kvm_tsc_scaling_ratio_frac_bits. func getBitsForScaling() int64 { fs := cpuid.HostFeatureSet() if fs.Intel() { return 48 // See vmx.c (kvm sources). } else if fs.AMD() { return 32 // See svm.c (svm sources). } else { return 63 // Unknown: theoretical maximum. } } // scaledTSC returns the host TSC scaled by the given frequency. // // This assumes a current frequency of 1. We require only the unitless ratio of // rawFreq to some current frequency. See setSystemTime for context. // // The kernel math guarantees that all bits of the multiplication and division // will be correctly preserved and applied. However, it is not possible to // actually store the ratio correctly. So we need to use the same schema in // order to calculate the scaled frequency and get the same result. // // We can assume that the current frequency is (1), so we are calculating a // strict inverse of this value. This simplifies this function considerably. // // Roughly, the returned value "scaledTSC" will have: // scaledTSC/hostTSC == 1/rawFreq // //go:nosplit func scaledTSC(rawFreq uintptr) int64 { scale := int64(1 << bitsForScaling) ratio := big.NewInt(scale / int64(rawFreq)) ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc()))) ratio.Div(ratio, big.NewInt(scale)) return ratio.Int64() } // setSystemTime sets the vCPU to the system time. func (c *vCPU) setSystemTime() error { // Attempt to set the offset directly. This is supported as of Linux 5.16, // or commit 828ca89628bfcb1b8f27535025f69dd00eb55207. if err := c.setTSCOffset(); err == nil
// If tsc scaling is not supported, fallback to legacy mode. if !c.machine.tscControl { return c.setSystemTimeLegacy() } // First, scale down the clock frequency to the lowest value allowed by // the API itself. How low we can go depends on the underlying // hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD. // Even the lower bound here will take a 4GHz frequency down to 1Hz, // meaning that everything should be able to handle a Khz setting of 1 // with bits to spare. // // Note that reducing the clock does not typically require special // capabilities as it is emulated in KVM. We don't actually use this // capability, but it means that this method should be robust to // different hardware configurations. rawFreq, err := c.getTSCFreq() if err != nil { return c.setSystemTimeLegacy() } if err := c.setTSCFreq(1); err != nil { return c.setSystemTimeLegacy() } // Always restore the original frequency. defer func() { if err := c.setTSCFreq(rawFreq); err != nil { panic(err.Error()) } }() // Attempt to set the system time in this compressed world. The // calculation for offset normally looks like: // // offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc()); // // So as long as the kvm_scale_tsc component is constant before and // after the call to set the TSC value (and it is passes as the // target_tsc), we will compute an offset value of zero. // // This is effectively cheating to make our "setSystemTime" call so // unbelievably, incredibly fast that we do it "instantly" and all the // calculations result in an offset of zero. lastTSC := scaledTSC(rawFreq) for { if err := c.setTSC(uint64(lastTSC)); err != nil { return err } nextTSC := scaledTSC(rawFreq) if lastTSC == nextTSC { return nil } lastTSC = nextTSC // Try again. } } // nonCanonical generates a canonical address return. // //go:nosplit func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { *info = linux.SignalInfo{ Signo: signal, Code: linux.SI_KERNEL, } info.SetAddr(addr) // Include address. return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() if !user { // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = linux.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) accessType := hostarch.AccessType{} if signal == int32(unix.SIGSEGV) { accessType = hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0, } } if !accessType.Write && !accessType.Execute { info.Code = 1 // SEGV_MAPERR. } else { info.Code = 2 // SEGV_ACCERR. } return accessType, platform.ErrContextSignal } //go:nosplit //go:noinline func loadByte(ptr *byte) byte { return *ptr } // SwitchToUser unpacks architectural-details. func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) } else if !ring0.IsCanonical(regs.Rsp) { return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Fs_base) { return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Gs_base) { return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info) } // Assign PCIDs. if c.PCIDs != nil { var requireFlushPCID bool // Force a flush? switchOpts.UserPCID, requireFlushPCID = c.PCIDs.Assign(switchOpts.PageTables) switchOpts.KernelPCID = fixedKernelPCID switchOpts.Flush = switchOpts.Flush || requireFlushPCID } // See below. var vector ring0.Vector // Past this point, stack growth can cause system calls (and a break // from guest mode). So we need to ensure that between the bluepill // call here and the switch call immediately below, no additional // allocations occur. entersyscall() bluepill(c) vector = c.CPU.SwitchToUser(switchOpts) exitsyscall() switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.Debug, ring0.Breakpoint: *info = linux.SignalInfo{ Signo: int32(unix.SIGTRAP), Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, ring0.BoundRangeExceeded, ring0.InvalidTSS, ring0.StackSegmentFault: *info = linux.SignalInfo{ Signo: int32(unix.SIGSEGV), Code: linux.SI_KERNEL, } info.SetAddr(switchOpts.Registers.Rip) // Include address. if vector == ring0.GeneralProtectionFault { // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. return hostarch.AccessType{}, tryCPUIDError{} } return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = linux.SignalInfo{ Signo: int32(unix.SIGILL), Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = linux.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by // KVM itself, so we think some mapping is writeable but it's // really not. This could happen, e.g. if some file is // truncated (and would generate a SIGBUS) and we map it // directly into the instance. return c.fault(int32(unix.SIGBUS), info) case ring0.DeviceNotAvailable, ring0.DoubleFault, ring0.CoprocessorSegmentOverrun, ring0.MachineCheck, ring0.SecurityException: fallthrough default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } } func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { // Map all the executable regions so that all the entry functions // are mapped in the upper half. if err := applyVirtualRegions(func(vr virtualRegion) { if excludeVirtualRegion(vr) || vr.filename == "[vsyscall]" { return } if vr.accessType.Execute { r := vr.region physical, length, ok := translateToPhysical(r.virtual) if !ok || length < r.length { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, pagetables.MapOpts{AccessType: hostarch.Execute, Global: true}, physical) } }); err != nil { panic(fmt.Sprintf("error parsing /proc/self/maps: %v", err)) } for start, end := range m.kernel.EntryRegions() { regionLen := end - start physical, length, ok := translateToPhysical(start) if !ok || length < regionLen { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|start), regionLen, pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true}, physical) } } // getMaxVCPU get max vCPU number func (m *machine) getMaxVCPU() { maxVCPUs, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) if errno != 0 { m.maxVCPUs = _KVM_NR_VCPUS } else { m.maxVCPUs = int(maxVCPUs) } // The goal here is to avoid vCPU contentions for reasonable workloads. // But "reasonable" isn't defined well in this case. Let's say that CPU // overcommit with factor 2 is still acceptable. We allocate a set of // vCPU for each goruntime processor (P) and two sets of vCPUs to run // user code. rCPUs := runtime.GOMAXPROCS(0) if 3*rCPUs < m.maxVCPUs { m.maxVCPUs = 3 * rCPUs } } func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion { return physicalRegions }
{ return err }
conditional_block
machine_amd64.go
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build amd64 // +build amd64 package kvm import ( "fmt" "math/big" "reflect" "runtime" "runtime/debug" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/cpuid" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" ) // initArchState initializes architecture-specific state. func (m *machine) initArchState() error { // Set the legacy TSS address. This address is covered by the reserved // range (up to 4GB). In fact, this is a main reason it exists. if _, _, errno := unix.RawSyscall( unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } // Initialize all vCPUs to minimize kvm ioctl-s allowed by seccomp filters. m.mu.Lock() for i := 0; i < m.maxVCPUs; i++ { m.createVCPU(i) } m.mu.Unlock() c := m.Get() defer m.Put(c) // Enable CPUID faulting, if possible. Note that this also serves as a // basic platform sanity tests, since we will enter guest mode for the // first time here. The recovery is necessary, since if we fail to read // the platform info register, we will retry to host mode and // ultimately need to handle a segmentation fault. old := debug.SetPanicOnFault(true) defer func() { recover() debug.SetPanicOnFault(old) }() bluepill(c) ring0.SetCPUIDFaulting(true) return nil } type vCPUArchState struct { // PCIDs is the set of PCIDs for this vCPU. // // This starts above fixedKernelPCID. PCIDs *pagetables.PCIDs } const ( // fixedKernelPCID is a fixed kernel PCID used for the kernel page // tables. We must start allocating user PCIDs above this in order to // avoid any conflict (see below). fixedKernelPCID = 1 // poolPCIDs is the number of PCIDs to record in the database. As this // grows, assignment can take longer, since it is a simple linear scan. // Beyond a relatively small number, there are likely few perform // benefits, since the TLB has likely long since lost any translations // from more than a few PCIDs past. poolPCIDs = 8 ) // initArchState initializes architecture-specific state. func (c *vCPU) initArchState() error { var ( kernelSystemRegs systemRegs kernelUserRegs userRegs ) // Set base control registers. kernelSystemRegs.CR0 = c.CR0() kernelSystemRegs.CR4 = c.CR4() kernelSystemRegs.EFER = c.EFER() // Set the IDT & GDT in the registers. kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT() kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT() kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode) kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata) kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata) tssBase, tssLimit, tss := c.TSS() kernelSystemRegs.TR.Load(tss, ring0.Tss) kernelSystemRegs.TR.base = tssBase kernelSystemRegs.TR.limit = uint32(tssLimit) // Point to kernel page tables, with no initial PCID. kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0) // Initialize the PCID database. if hasGuestPCID { // Note that NewPCIDs may return a nil table here, in which // case we simply don't use PCID support (see below). In // practice, this should not happen, however. c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs) } // Set the CPUID; this is required before setting system registers, // since KVM will reject several CR4 bits if the CPUID does not // indicate the support is available. if err := c.setCPUID(); err != nil { return err } // Set the entrypoint for the kernel. kernelUserRegs.RIP = uint64(ring0.AddrOfStart()) kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer()) kernelUserRegs.RSP = c.StackTop() kernelUserRegs.RFLAGS = ring0.KernelFlagsSet // Set the system registers. if err := c.setSystemRegisters(&kernelSystemRegs); err != nil { return err } // Set the user registers. if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 { return fmt.Errorf("error setting user registers: %v", errno) } // Set the time offset to the host native time. return c.setSystemTime() } // bitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. // It is set using getBitsForScaling when the KVM platform is initialized. var bitsForScaling int64 // getBitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. This allows us to replicate the (bad) math done by // the kernel below in scaledTSC, and ensure we can compute an exact zero // offset in setSystemTime. // // These constants correspond to kvm_tsc_scaling_ratio_frac_bits. func getBitsForScaling() int64 { fs := cpuid.HostFeatureSet() if fs.Intel() { return 48 // See vmx.c (kvm sources). } else if fs.AMD() { return 32 // See svm.c (svm sources). } else { return 63 // Unknown: theoretical maximum. } } // scaledTSC returns the host TSC scaled by the given frequency. // // This assumes a current frequency of 1. We require only the unitless ratio of // rawFreq to some current frequency. See setSystemTime for context. // // The kernel math guarantees that all bits of the multiplication and division // will be correctly preserved and applied. However, it is not possible to // actually store the ratio correctly. So we need to use the same schema in // order to calculate the scaled frequency and get the same result. // // We can assume that the current frequency is (1), so we are calculating a // strict inverse of this value. This simplifies this function considerably. // // Roughly, the returned value "scaledTSC" will have: // scaledTSC/hostTSC == 1/rawFreq // //go:nosplit func scaledTSC(rawFreq uintptr) int64 { scale := int64(1 << bitsForScaling) ratio := big.NewInt(scale / int64(rawFreq)) ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc()))) ratio.Div(ratio, big.NewInt(scale)) return ratio.Int64() } // setSystemTime sets the vCPU to the system time. func (c *vCPU) setSystemTime() error { // Attempt to set the offset directly. This is supported as of Linux 5.16, // or commit 828ca89628bfcb1b8f27535025f69dd00eb55207. if err := c.setTSCOffset(); err == nil { return err } // If tsc scaling is not supported, fallback to legacy mode. if !c.machine.tscControl { return c.setSystemTimeLegacy() } // First, scale down the clock frequency to the lowest value allowed by // the API itself. How low we can go depends on the underlying // hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD. // Even the lower bound here will take a 4GHz frequency down to 1Hz, // meaning that everything should be able to handle a Khz setting of 1 // with bits to spare. // // Note that reducing the clock does not typically require special // capabilities as it is emulated in KVM. We don't actually use this // capability, but it means that this method should be robust to // different hardware configurations. rawFreq, err := c.getTSCFreq() if err != nil { return c.setSystemTimeLegacy() } if err := c.setTSCFreq(1); err != nil { return c.setSystemTimeLegacy() } // Always restore the original frequency. defer func() { if err := c.setTSCFreq(rawFreq); err != nil { panic(err.Error()) } }() // Attempt to set the system time in this compressed world. The // calculation for offset normally looks like: // // offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc()); // // So as long as the kvm_scale_tsc component is constant before and // after the call to set the TSC value (and it is passes as the // target_tsc), we will compute an offset value of zero. // // This is effectively cheating to make our "setSystemTime" call so // unbelievably, incredibly fast that we do it "instantly" and all the // calculations result in an offset of zero. lastTSC := scaledTSC(rawFreq) for { if err := c.setTSC(uint64(lastTSC)); err != nil { return err } nextTSC := scaledTSC(rawFreq) if lastTSC == nextTSC { return nil } lastTSC = nextTSC // Try again. } } // nonCanonical generates a canonical address return. // //go:nosplit func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { *info = linux.SignalInfo{ Signo: signal, Code: linux.SI_KERNEL, } info.SetAddr(addr) // Include address. return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() if !user { // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = linux.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) accessType := hostarch.AccessType{} if signal == int32(unix.SIGSEGV) { accessType = hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0,
} else { info.Code = 2 // SEGV_ACCERR. } return accessType, platform.ErrContextSignal } //go:nosplit //go:noinline func loadByte(ptr *byte) byte { return *ptr } // SwitchToUser unpacks architectural-details. func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) } else if !ring0.IsCanonical(regs.Rsp) { return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Fs_base) { return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Gs_base) { return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info) } // Assign PCIDs. if c.PCIDs != nil { var requireFlushPCID bool // Force a flush? switchOpts.UserPCID, requireFlushPCID = c.PCIDs.Assign(switchOpts.PageTables) switchOpts.KernelPCID = fixedKernelPCID switchOpts.Flush = switchOpts.Flush || requireFlushPCID } // See below. var vector ring0.Vector // Past this point, stack growth can cause system calls (and a break // from guest mode). So we need to ensure that between the bluepill // call here and the switch call immediately below, no additional // allocations occur. entersyscall() bluepill(c) vector = c.CPU.SwitchToUser(switchOpts) exitsyscall() switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.Debug, ring0.Breakpoint: *info = linux.SignalInfo{ Signo: int32(unix.SIGTRAP), Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, ring0.BoundRangeExceeded, ring0.InvalidTSS, ring0.StackSegmentFault: *info = linux.SignalInfo{ Signo: int32(unix.SIGSEGV), Code: linux.SI_KERNEL, } info.SetAddr(switchOpts.Registers.Rip) // Include address. if vector == ring0.GeneralProtectionFault { // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. return hostarch.AccessType{}, tryCPUIDError{} } return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = linux.SignalInfo{ Signo: int32(unix.SIGILL), Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = linux.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by // KVM itself, so we think some mapping is writeable but it's // really not. This could happen, e.g. if some file is // truncated (and would generate a SIGBUS) and we map it // directly into the instance. return c.fault(int32(unix.SIGBUS), info) case ring0.DeviceNotAvailable, ring0.DoubleFault, ring0.CoprocessorSegmentOverrun, ring0.MachineCheck, ring0.SecurityException: fallthrough default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } } func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { // Map all the executable regions so that all the entry functions // are mapped in the upper half. if err := applyVirtualRegions(func(vr virtualRegion) { if excludeVirtualRegion(vr) || vr.filename == "[vsyscall]" { return } if vr.accessType.Execute { r := vr.region physical, length, ok := translateToPhysical(r.virtual) if !ok || length < r.length { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, pagetables.MapOpts{AccessType: hostarch.Execute, Global: true}, physical) } }); err != nil { panic(fmt.Sprintf("error parsing /proc/self/maps: %v", err)) } for start, end := range m.kernel.EntryRegions() { regionLen := end - start physical, length, ok := translateToPhysical(start) if !ok || length < regionLen { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|start), regionLen, pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true}, physical) } } // getMaxVCPU get max vCPU number func (m *machine) getMaxVCPU() { maxVCPUs, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) if errno != 0 { m.maxVCPUs = _KVM_NR_VCPUS } else { m.maxVCPUs = int(maxVCPUs) } // The goal here is to avoid vCPU contentions for reasonable workloads. // But "reasonable" isn't defined well in this case. Let's say that CPU // overcommit with factor 2 is still acceptable. We allocate a set of // vCPU for each goruntime processor (P) and two sets of vCPUs to run // user code. rCPUs := runtime.GOMAXPROCS(0) if 3*rCPUs < m.maxVCPUs { m.maxVCPUs = 3 * rCPUs } } func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion { return physicalRegions }
} } if !accessType.Write && !accessType.Execute { info.Code = 1 // SEGV_MAPERR.
random_line_split
machine_amd64.go
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build amd64 // +build amd64 package kvm import ( "fmt" "math/big" "reflect" "runtime" "runtime/debug" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/cpuid" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" ) // initArchState initializes architecture-specific state. func (m *machine) initArchState() error { // Set the legacy TSS address. This address is covered by the reserved // range (up to 4GB). In fact, this is a main reason it exists. if _, _, errno := unix.RawSyscall( unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } // Initialize all vCPUs to minimize kvm ioctl-s allowed by seccomp filters. m.mu.Lock() for i := 0; i < m.maxVCPUs; i++ { m.createVCPU(i) } m.mu.Unlock() c := m.Get() defer m.Put(c) // Enable CPUID faulting, if possible. Note that this also serves as a // basic platform sanity tests, since we will enter guest mode for the // first time here. The recovery is necessary, since if we fail to read // the platform info register, we will retry to host mode and // ultimately need to handle a segmentation fault. old := debug.SetPanicOnFault(true) defer func() { recover() debug.SetPanicOnFault(old) }() bluepill(c) ring0.SetCPUIDFaulting(true) return nil } type vCPUArchState struct { // PCIDs is the set of PCIDs for this vCPU. // // This starts above fixedKernelPCID. PCIDs *pagetables.PCIDs } const ( // fixedKernelPCID is a fixed kernel PCID used for the kernel page // tables. We must start allocating user PCIDs above this in order to // avoid any conflict (see below). fixedKernelPCID = 1 // poolPCIDs is the number of PCIDs to record in the database. As this // grows, assignment can take longer, since it is a simple linear scan. // Beyond a relatively small number, there are likely few perform // benefits, since the TLB has likely long since lost any translations // from more than a few PCIDs past. poolPCIDs = 8 ) // initArchState initializes architecture-specific state. func (c *vCPU) initArchState() error { var ( kernelSystemRegs systemRegs kernelUserRegs userRegs ) // Set base control registers. kernelSystemRegs.CR0 = c.CR0() kernelSystemRegs.CR4 = c.CR4() kernelSystemRegs.EFER = c.EFER() // Set the IDT & GDT in the registers. kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT() kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT() kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode) kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata) kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata) kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata) tssBase, tssLimit, tss := c.TSS() kernelSystemRegs.TR.Load(tss, ring0.Tss) kernelSystemRegs.TR.base = tssBase kernelSystemRegs.TR.limit = uint32(tssLimit) // Point to kernel page tables, with no initial PCID. kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0) // Initialize the PCID database. if hasGuestPCID { // Note that NewPCIDs may return a nil table here, in which // case we simply don't use PCID support (see below). In // practice, this should not happen, however. c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs) } // Set the CPUID; this is required before setting system registers, // since KVM will reject several CR4 bits if the CPUID does not // indicate the support is available. if err := c.setCPUID(); err != nil { return err } // Set the entrypoint for the kernel. kernelUserRegs.RIP = uint64(ring0.AddrOfStart()) kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer()) kernelUserRegs.RSP = c.StackTop() kernelUserRegs.RFLAGS = ring0.KernelFlagsSet // Set the system registers. if err := c.setSystemRegisters(&kernelSystemRegs); err != nil { return err } // Set the user registers. if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 { return fmt.Errorf("error setting user registers: %v", errno) } // Set the time offset to the host native time. return c.setSystemTime() } // bitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. // It is set using getBitsForScaling when the KVM platform is initialized. var bitsForScaling int64 // getBitsForScaling returns the bits available for storing the fraction component // of the TSC scaling ratio. This allows us to replicate the (bad) math done by // the kernel below in scaledTSC, and ensure we can compute an exact zero // offset in setSystemTime. // // These constants correspond to kvm_tsc_scaling_ratio_frac_bits. func getBitsForScaling() int64 { fs := cpuid.HostFeatureSet() if fs.Intel() { return 48 // See vmx.c (kvm sources). } else if fs.AMD() { return 32 // See svm.c (svm sources). } else { return 63 // Unknown: theoretical maximum. } } // scaledTSC returns the host TSC scaled by the given frequency. // // This assumes a current frequency of 1. We require only the unitless ratio of // rawFreq to some current frequency. See setSystemTime for context. // // The kernel math guarantees that all bits of the multiplication and division // will be correctly preserved and applied. However, it is not possible to // actually store the ratio correctly. So we need to use the same schema in // order to calculate the scaled frequency and get the same result. // // We can assume that the current frequency is (1), so we are calculating a // strict inverse of this value. This simplifies this function considerably. // // Roughly, the returned value "scaledTSC" will have: // scaledTSC/hostTSC == 1/rawFreq // //go:nosplit func scaledTSC(rawFreq uintptr) int64 { scale := int64(1 << bitsForScaling) ratio := big.NewInt(scale / int64(rawFreq)) ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc()))) ratio.Div(ratio, big.NewInt(scale)) return ratio.Int64() } // setSystemTime sets the vCPU to the system time. func (c *vCPU) setSystemTime() error { // Attempt to set the offset directly. This is supported as of Linux 5.16, // or commit 828ca89628bfcb1b8f27535025f69dd00eb55207. if err := c.setTSCOffset(); err == nil { return err } // If tsc scaling is not supported, fallback to legacy mode. if !c.machine.tscControl { return c.setSystemTimeLegacy() } // First, scale down the clock frequency to the lowest value allowed by // the API itself. How low we can go depends on the underlying // hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD. // Even the lower bound here will take a 4GHz frequency down to 1Hz, // meaning that everything should be able to handle a Khz setting of 1 // with bits to spare. // // Note that reducing the clock does not typically require special // capabilities as it is emulated in KVM. We don't actually use this // capability, but it means that this method should be robust to // different hardware configurations. rawFreq, err := c.getTSCFreq() if err != nil { return c.setSystemTimeLegacy() } if err := c.setTSCFreq(1); err != nil { return c.setSystemTimeLegacy() } // Always restore the original frequency. defer func() { if err := c.setTSCFreq(rawFreq); err != nil { panic(err.Error()) } }() // Attempt to set the system time in this compressed world. The // calculation for offset normally looks like: // // offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc()); // // So as long as the kvm_scale_tsc component is constant before and // after the call to set the TSC value (and it is passes as the // target_tsc), we will compute an offset value of zero. // // This is effectively cheating to make our "setSystemTime" call so // unbelievably, incredibly fast that we do it "instantly" and all the // calculations result in an offset of zero. lastTSC := scaledTSC(rawFreq) for { if err := c.setTSC(uint64(lastTSC)); err != nil { return err } nextTSC := scaledTSC(rawFreq) if lastTSC == nextTSC { return nil } lastTSC = nextTSC // Try again. } } // nonCanonical generates a canonical address return. // //go:nosplit func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { *info = linux.SignalInfo{ Signo: signal, Code: linux.SI_KERNEL, } info.SetAddr(addr) // Include address. return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() if !user { // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = linux.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) accessType := hostarch.AccessType{} if signal == int32(unix.SIGSEGV) { accessType = hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0, } } if !accessType.Write && !accessType.Execute { info.Code = 1 // SEGV_MAPERR. } else { info.Code = 2 // SEGV_ACCERR. } return accessType, platform.ErrContextSignal } //go:nosplit //go:noinline func loadByte(ptr *byte) byte { return *ptr } // SwitchToUser unpacks architectural-details. func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo) (hostarch.AccessType, error)
func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { // Map all the executable regions so that all the entry functions // are mapped in the upper half. if err := applyVirtualRegions(func(vr virtualRegion) { if excludeVirtualRegion(vr) || vr.filename == "[vsyscall]" { return } if vr.accessType.Execute { r := vr.region physical, length, ok := translateToPhysical(r.virtual) if !ok || length < r.length { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, pagetables.MapOpts{AccessType: hostarch.Execute, Global: true}, physical) } }); err != nil { panic(fmt.Sprintf("error parsing /proc/self/maps: %v", err)) } for start, end := range m.kernel.EntryRegions() { regionLen := end - start physical, length, ok := translateToPhysical(start) if !ok || length < regionLen { panic("impossible translation") } pageTable.Map( hostarch.Addr(ring0.KernelStartAddress|start), regionLen, pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true}, physical) } } // getMaxVCPU get max vCPU number func (m *machine) getMaxVCPU() { maxVCPUs, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) if errno != 0 { m.maxVCPUs = _KVM_NR_VCPUS } else { m.maxVCPUs = int(maxVCPUs) } // The goal here is to avoid vCPU contentions for reasonable workloads. // But "reasonable" isn't defined well in this case. Let's say that CPU // overcommit with factor 2 is still acceptable. We allocate a set of // vCPU for each goruntime processor (P) and two sets of vCPUs to run // user code. rCPUs := runtime.GOMAXPROCS(0) if 3*rCPUs < m.maxVCPUs { m.maxVCPUs = 3 * rCPUs } } func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion { return physicalRegions }
{ // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) } else if !ring0.IsCanonical(regs.Rsp) { return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Fs_base) { return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info) } else if !ring0.IsCanonical(regs.Gs_base) { return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info) } // Assign PCIDs. if c.PCIDs != nil { var requireFlushPCID bool // Force a flush? switchOpts.UserPCID, requireFlushPCID = c.PCIDs.Assign(switchOpts.PageTables) switchOpts.KernelPCID = fixedKernelPCID switchOpts.Flush = switchOpts.Flush || requireFlushPCID } // See below. var vector ring0.Vector // Past this point, stack growth can cause system calls (and a break // from guest mode). So we need to ensure that between the bluepill // call here and the switch call immediately below, no additional // allocations occur. entersyscall() bluepill(c) vector = c.CPU.SwitchToUser(switchOpts) exitsyscall() switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.Debug, ring0.Breakpoint: *info = linux.SignalInfo{ Signo: int32(unix.SIGTRAP), Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, ring0.BoundRangeExceeded, ring0.InvalidTSS, ring0.StackSegmentFault: *info = linux.SignalInfo{ Signo: int32(unix.SIGSEGV), Code: linux.SI_KERNEL, } info.SetAddr(switchOpts.Registers.Rip) // Include address. if vector == ring0.GeneralProtectionFault { // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. return hostarch.AccessType{}, tryCPUIDError{} } return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = linux.SignalInfo{ Signo: int32(unix.SIGILL), Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: *info = linux.SignalInfo{ Signo: int32(unix.SIGFPE), Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = linux.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by // KVM itself, so we think some mapping is writeable but it's // really not. This could happen, e.g. if some file is // truncated (and would generate a SIGBUS) and we map it // directly into the instance. return c.fault(int32(unix.SIGBUS), info) case ring0.DeviceNotAvailable, ring0.DoubleFault, ring0.CoprocessorSegmentOverrun, ring0.MachineCheck, ring0.SecurityException: fallthrough default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } }
identifier_body
main.py
''' 2D Coupled Burgers' system model === Distributed by: Notre Dame CICS (MIT Liscense) - Associated publication: url: http://www.sciencedirect.com/science/article/pii/S0021999119307612 doi: https://doi.org/10.1016/j.jcp.2019.109056 github: https://github.com/cics-nd/ar-pde-cnn === ''' from args import Parser from nn.denseEDcirc2d import DenseED from nn.bayesNN import BayesNN from nn.swag import SwagNN from nn.burger2DFiniteDifference import Burger2DIntegrate from utils.utils import mkdirs, toNumpy, toTuple from utils.burgerLoader2D import BurgerLoader from utils.post import plotPred, plotSamples from torch.optim.lr_scheduler import ExponentialLR import torch import torch.nn.functional as F import numpy as np import os, time def train(args, model, burgerInt, train_loader, optimizer, tsteps, tback, tstart, dt=0.1): ''' Trains the model Args: args (argparse): object with programs arguements model (PyTorch model): SWAG DenseED model to be tested burgerInt (BurgerIntegrate): 1D Burger system time integrator train_loader (dataloader): dataloader with training cases (use createTrainingLoader) optimizer (Pytorch Optm): optimzer tsteps (np.array): [mb] number of timesteps to predict for each mini-batch tback (np.array): [mb] number of timesteps to forward predict before back prop tstart (np.array): [mb] time-step to start updating model (kept at 0 for now) dt (float): current time-step size of the model (used to progressively increase time-step size) Returns: loss_total (float): negative log joint posterior mse_total (float): mean square error between the prediction and time-integrator ''' model.train() loss_total = 0 mse_total = 0 # Mini-batch loop for batch_idx, input in enumerate(train_loader): # input [b, 2, x, y] # Expand input to match model in channels dims = torch.ones(len(input.shape)) dims[1] = args.nic input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) loss = 0 # Loop for number of timesteps optimizer.zero_grad() for i in range(tsteps[batch_idx]): uPred = model(input[:,-2*args.nic:,:]) if(i < tstart[batch_idx]): # Don't calculate residual, just predict forward input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred[:,0,:].unsqueeze(1).detach() input = torch.cat([input, input0], dim=1) else: # Calculate loss # Start with implicit time integration ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt) # Calc. loss based on posterior of the model log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader)) loss = loss + log_joint loss_total = loss_total + loss.data.item() mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler # Back-prop through two timesteps if((i+1)%tback[batch_idx] == 0): loss.backward() loss = 0 optimizer.step() optimizer.zero_grad() input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) else: input0 = uPred input = torch.cat([input, input0], dim=1) if(batch_idx % 10 == 1): print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \ len(train_loader), int(100*batch_idx/len(train_loader)))) return loss_total/len(train_loader), mse_total/len(train_loader) def test(args, model, test_loader, tstep=100, test_every=2): ''' Tests the deterministic model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' model.eval() mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0 u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu() # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:,:]) if((t_idx+1)%test_every == 0):
input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2): ''' Tests the samples of the Bayesian SWAG model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for n_samples (int): number of model samples to draw test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel) for i in range(n_samples): print('Executing model sample {:d}'.format(i)) model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training model.eval() for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) if(i == 0): # Save target data u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)] u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0 # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target if __name__ == '__main__': # Parse arguements args = Parser().parse() use_cuda = "cpu" if(torch.cuda.is_available()): use_cuda = "cuda" args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Torch device:{}".format(args.device)) # Domain settings, matches solver settings x0 = 0 x1 = 1.0 args.dx = (x1 - x0)/args.nel # Create training loader burgerLoader = BurgerLoader(dt=args.dt) training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size) # Create training loader test_cases = np.array([400, 401]).astype(int) testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2) # Create DenseED model denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc, blocks=args.blocks, growth_rate=args.growth_rate, init_features=args.init_features, bn_size=args.bn_size, drop_rate=args.drop_rate, bottleneck=False, out_activation=None).to(args.device) # Bayesian neural network bayes_nn = BayesNN(args, denseED) # Stochastic weighted averages swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max) # Optimizer parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0) # Learning rate scheduler scheduler = ExponentialLR(optimizer, gamma=0.995) # If we are starting from a specific epoch, attempt to load a model if(args.epoch_start > 0): optimizer, scheduler = swag_nn.loadModel(args.epoch_start, optimizer, scheduler, file_dir=args.ckpt_dir) # Create Burger time integrator # Here we will use 2nd order finite differences for spacial derivatives burgerInt = Burger2DIntegrate(args.dx, nu=args.nu, grad_kernels=[3, 3], device=args.device) # Progressively increase the time step to help stabailize training dtStep = 25 dtArr = np.linspace(np.log10(args.dt)-1, np.log10(args.dt), dtStep) dtArr = 10**(dtArr) # ========== Epoch loop ============ print('>>> Training network, lets rock') for epoch in range(args.epoch_start+1, args.epochs + 1): if(epoch == args.swag_start): print('Starting to sample weights every {:d} epochs'.format(args.swag_freq)) # Mannually set learning rate to swag sampling rate parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.swag_lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.swag_lr, weight_decay=0.0) scheduler = ExponentialLR(optimizer, gamma=0.9) dt = dtArr[min(epoch, dtArr.shape[0]-1)] # Number of timesteps to predict forward tsteps = np.zeros(len(training_loader)).astype(int) + int(90*min(epoch/75, 1) + 10) if(epoch >= args.swag_start): # Once SWAG sampling starts randomly pick a length to unroll. # This is done to help speed training up. tsteps = np.zeros(len(training_loader)).astype(int) + np.random.randint(10, int(90*min(epoch/75., 1.0) + 10), len(training_loader)) # Back-prop interval tback = np.zeros((len(training_loader))) + np.random.randint(2,5,tsteps.shape[0]) # Time-step to start training at tstart = np.zeros(tsteps.shape[0]) # Train network loss, mse = train(args, swag_nn, burgerInt, training_loader, optimizer, \ tsteps, tback, tstart, dt) print("Epoch: {}, Loss: {:0.5E}, MSE: {:0.5E}, Noise {:0.3f}" \ .format(epoch, loss, mse, swag_nn.base.beta())) # If not sampling weights we can adjust the learning rate if (epoch < args.swag_start + 10): # Update the learning rate scheduler.step() for param_group in optimizer.param_groups: print('Epoch {}, lr: {}'.format(epoch, param_group['lr'])) # Sample model parameters for SWAG posterior approx. # NOTE: 10 epoch burn in period with learning rate decay if(epoch >= args.swag_start + 10 and epoch % args.swag_freq == 0): print('Collecting model') swag_nn.collect() # Testing if(epoch % args.plot_freq == 0): n_test = 200 # Number to time-steps to test with torch.no_grad(): uPred, uTarget = test(args, swag_nn.base, testing_loader, tstep=n_test) # Construct domain for plotting tTest = np.arange(0, n_test*args.dt+1e-8, args.dt) xTest = np.linspace(x0, x1, args.nel+1) for bidx in range(2): plotPred(args, bidx, uPred[bidx].cpu().numpy(), uTarget[bidx].cpu().numpy(), tsteps=40, \ target_step=4, pred_step=4, epoch=epoch) # Plot samples from swag if(epoch > args.swag_start): with torch.no_grad(): uPred, uTarget = testSample(args, swag_nn, testing_loader, tstep=n_test, n_samples=8) # Plot samples at time 0.1 and 0.25 bidx = 0 for t in [10, 25]: plotSamples(args, bidx, uPred[bidx].detach().numpy(), uTarget[bidx].cpu().numpy(), tstep=t, epoch=epoch) # Save model periodically if(epoch % args.ckpt_freq == 0): swag_nn.saveModel(int(epoch), optimizer, scheduler, file_dir=args.ckpt_dir)
u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred
conditional_block
main.py
''' 2D Coupled Burgers' system model === Distributed by: Notre Dame CICS (MIT Liscense) - Associated publication: url: http://www.sciencedirect.com/science/article/pii/S0021999119307612 doi: https://doi.org/10.1016/j.jcp.2019.109056 github: https://github.com/cics-nd/ar-pde-cnn === ''' from args import Parser from nn.denseEDcirc2d import DenseED from nn.bayesNN import BayesNN from nn.swag import SwagNN from nn.burger2DFiniteDifference import Burger2DIntegrate from utils.utils import mkdirs, toNumpy, toTuple from utils.burgerLoader2D import BurgerLoader from utils.post import plotPred, plotSamples from torch.optim.lr_scheduler import ExponentialLR import torch import torch.nn.functional as F import numpy as np import os, time def train(args, model, burgerInt, train_loader, optimizer, tsteps, tback, tstart, dt=0.1): ''' Trains the model Args: args (argparse): object with programs arguements model (PyTorch model): SWAG DenseED model to be tested burgerInt (BurgerIntegrate): 1D Burger system time integrator train_loader (dataloader): dataloader with training cases (use createTrainingLoader) optimizer (Pytorch Optm): optimzer tsteps (np.array): [mb] number of timesteps to predict for each mini-batch tback (np.array): [mb] number of timesteps to forward predict before back prop tstart (np.array): [mb] time-step to start updating model (kept at 0 for now) dt (float): current time-step size of the model (used to progressively increase time-step size) Returns: loss_total (float): negative log joint posterior mse_total (float): mean square error between the prediction and time-integrator ''' model.train() loss_total = 0 mse_total = 0 # Mini-batch loop for batch_idx, input in enumerate(train_loader): # input [b, 2, x, y] # Expand input to match model in channels dims = torch.ones(len(input.shape)) dims[1] = args.nic input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) loss = 0 # Loop for number of timesteps optimizer.zero_grad() for i in range(tsteps[batch_idx]): uPred = model(input[:,-2*args.nic:,:]) if(i < tstart[batch_idx]): # Don't calculate residual, just predict forward input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred[:,0,:].unsqueeze(1).detach() input = torch.cat([input, input0], dim=1) else: # Calculate loss # Start with implicit time integration ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt) # Calc. loss based on posterior of the model log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader)) loss = loss + log_joint loss_total = loss_total + loss.data.item() mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler # Back-prop through two timesteps if((i+1)%tback[batch_idx] == 0): loss.backward() loss = 0 optimizer.step() optimizer.zero_grad() input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) else: input0 = uPred input = torch.cat([input, input0], dim=1) if(batch_idx % 10 == 1): print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \ len(train_loader), int(100*batch_idx/len(train_loader)))) return loss_total/len(train_loader), mse_total/len(train_loader) def test(args, model, test_loader, tstep=100, test_every=2): ''' Tests the deterministic model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' model.eval() mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0 u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu() # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2): ''' Tests the samples of the Bayesian SWAG model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for n_samples (int): number of model samples to draw test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel) for i in range(n_samples): print('Executing model sample {:d}'.format(i)) model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training model.eval() for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) if(i == 0): # Save target data u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)] u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0 # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target if __name__ == '__main__': # Parse arguements args = Parser().parse() use_cuda = "cpu" if(torch.cuda.is_available()): use_cuda = "cuda" args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Torch device:{}".format(args.device)) # Domain settings, matches solver settings x0 = 0 x1 = 1.0 args.dx = (x1 - x0)/args.nel # Create training loader burgerLoader = BurgerLoader(dt=args.dt) training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size) # Create training loader test_cases = np.array([400, 401]).astype(int) testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2) # Create DenseED model denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc,
init_features=args.init_features, bn_size=args.bn_size, drop_rate=args.drop_rate, bottleneck=False, out_activation=None).to(args.device) # Bayesian neural network bayes_nn = BayesNN(args, denseED) # Stochastic weighted averages swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max) # Optimizer parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0) # Learning rate scheduler scheduler = ExponentialLR(optimizer, gamma=0.995) # If we are starting from a specific epoch, attempt to load a model if(args.epoch_start > 0): optimizer, scheduler = swag_nn.loadModel(args.epoch_start, optimizer, scheduler, file_dir=args.ckpt_dir) # Create Burger time integrator # Here we will use 2nd order finite differences for spacial derivatives burgerInt = Burger2DIntegrate(args.dx, nu=args.nu, grad_kernels=[3, 3], device=args.device) # Progressively increase the time step to help stabailize training dtStep = 25 dtArr = np.linspace(np.log10(args.dt)-1, np.log10(args.dt), dtStep) dtArr = 10**(dtArr) # ========== Epoch loop ============ print('>>> Training network, lets rock') for epoch in range(args.epoch_start+1, args.epochs + 1): if(epoch == args.swag_start): print('Starting to sample weights every {:d} epochs'.format(args.swag_freq)) # Mannually set learning rate to swag sampling rate parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.swag_lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.swag_lr, weight_decay=0.0) scheduler = ExponentialLR(optimizer, gamma=0.9) dt = dtArr[min(epoch, dtArr.shape[0]-1)] # Number of timesteps to predict forward tsteps = np.zeros(len(training_loader)).astype(int) + int(90*min(epoch/75, 1) + 10) if(epoch >= args.swag_start): # Once SWAG sampling starts randomly pick a length to unroll. # This is done to help speed training up. tsteps = np.zeros(len(training_loader)).astype(int) + np.random.randint(10, int(90*min(epoch/75., 1.0) + 10), len(training_loader)) # Back-prop interval tback = np.zeros((len(training_loader))) + np.random.randint(2,5,tsteps.shape[0]) # Time-step to start training at tstart = np.zeros(tsteps.shape[0]) # Train network loss, mse = train(args, swag_nn, burgerInt, training_loader, optimizer, \ tsteps, tback, tstart, dt) print("Epoch: {}, Loss: {:0.5E}, MSE: {:0.5E}, Noise {:0.3f}" \ .format(epoch, loss, mse, swag_nn.base.beta())) # If not sampling weights we can adjust the learning rate if (epoch < args.swag_start + 10): # Update the learning rate scheduler.step() for param_group in optimizer.param_groups: print('Epoch {}, lr: {}'.format(epoch, param_group['lr'])) # Sample model parameters for SWAG posterior approx. # NOTE: 10 epoch burn in period with learning rate decay if(epoch >= args.swag_start + 10 and epoch % args.swag_freq == 0): print('Collecting model') swag_nn.collect() # Testing if(epoch % args.plot_freq == 0): n_test = 200 # Number to time-steps to test with torch.no_grad(): uPred, uTarget = test(args, swag_nn.base, testing_loader, tstep=n_test) # Construct domain for plotting tTest = np.arange(0, n_test*args.dt+1e-8, args.dt) xTest = np.linspace(x0, x1, args.nel+1) for bidx in range(2): plotPred(args, bidx, uPred[bidx].cpu().numpy(), uTarget[bidx].cpu().numpy(), tsteps=40, \ target_step=4, pred_step=4, epoch=epoch) # Plot samples from swag if(epoch > args.swag_start): with torch.no_grad(): uPred, uTarget = testSample(args, swag_nn, testing_loader, tstep=n_test, n_samples=8) # Plot samples at time 0.1 and 0.25 bidx = 0 for t in [10, 25]: plotSamples(args, bidx, uPred[bidx].detach().numpy(), uTarget[bidx].cpu().numpy(), tstep=t, epoch=epoch) # Save model periodically if(epoch % args.ckpt_freq == 0): swag_nn.saveModel(int(epoch), optimizer, scheduler, file_dir=args.ckpt_dir)
blocks=args.blocks, growth_rate=args.growth_rate,
random_line_split
main.py
''' 2D Coupled Burgers' system model === Distributed by: Notre Dame CICS (MIT Liscense) - Associated publication: url: http://www.sciencedirect.com/science/article/pii/S0021999119307612 doi: https://doi.org/10.1016/j.jcp.2019.109056 github: https://github.com/cics-nd/ar-pde-cnn === ''' from args import Parser from nn.denseEDcirc2d import DenseED from nn.bayesNN import BayesNN from nn.swag import SwagNN from nn.burger2DFiniteDifference import Burger2DIntegrate from utils.utils import mkdirs, toNumpy, toTuple from utils.burgerLoader2D import BurgerLoader from utils.post import plotPred, plotSamples from torch.optim.lr_scheduler import ExponentialLR import torch import torch.nn.functional as F import numpy as np import os, time def train(args, model, burgerInt, train_loader, optimizer, tsteps, tback, tstart, dt=0.1): ''' Trains the model Args: args (argparse): object with programs arguements model (PyTorch model): SWAG DenseED model to be tested burgerInt (BurgerIntegrate): 1D Burger system time integrator train_loader (dataloader): dataloader with training cases (use createTrainingLoader) optimizer (Pytorch Optm): optimzer tsteps (np.array): [mb] number of timesteps to predict for each mini-batch tback (np.array): [mb] number of timesteps to forward predict before back prop tstart (np.array): [mb] time-step to start updating model (kept at 0 for now) dt (float): current time-step size of the model (used to progressively increase time-step size) Returns: loss_total (float): negative log joint posterior mse_total (float): mean square error between the prediction and time-integrator ''' model.train() loss_total = 0 mse_total = 0 # Mini-batch loop for batch_idx, input in enumerate(train_loader): # input [b, 2, x, y] # Expand input to match model in channels dims = torch.ones(len(input.shape)) dims[1] = args.nic input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) loss = 0 # Loop for number of timesteps optimizer.zero_grad() for i in range(tsteps[batch_idx]): uPred = model(input[:,-2*args.nic:,:]) if(i < tstart[batch_idx]): # Don't calculate residual, just predict forward input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred[:,0,:].unsqueeze(1).detach() input = torch.cat([input, input0], dim=1) else: # Calculate loss # Start with implicit time integration ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt) # Calc. loss based on posterior of the model log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader)) loss = loss + log_joint loss_total = loss_total + loss.data.item() mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler # Back-prop through two timesteps if((i+1)%tback[batch_idx] == 0): loss.backward() loss = 0 optimizer.step() optimizer.zero_grad() input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) else: input0 = uPred input = torch.cat([input, input0], dim=1) if(batch_idx % 10 == 1): print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \ len(train_loader), int(100*batch_idx/len(train_loader)))) return loss_total/len(train_loader), mse_total/len(train_loader) def test(args, model, test_loader, tstep=100, test_every=2): ''' Tests the deterministic model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' model.eval() mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0 u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu() # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2):
if __name__ == '__main__': # Parse arguements args = Parser().parse() use_cuda = "cpu" if(torch.cuda.is_available()): use_cuda = "cuda" args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Torch device:{}".format(args.device)) # Domain settings, matches solver settings x0 = 0 x1 = 1.0 args.dx = (x1 - x0)/args.nel # Create training loader burgerLoader = BurgerLoader(dt=args.dt) training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size) # Create training loader test_cases = np.array([400, 401]).astype(int) testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2) # Create DenseED model denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc, blocks=args.blocks, growth_rate=args.growth_rate, init_features=args.init_features, bn_size=args.bn_size, drop_rate=args.drop_rate, bottleneck=False, out_activation=None).to(args.device) # Bayesian neural network bayes_nn = BayesNN(args, denseED) # Stochastic weighted averages swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max) # Optimizer parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0) # Learning rate scheduler scheduler = ExponentialLR(optimizer, gamma=0.995) # If we are starting from a specific epoch, attempt to load a model if(args.epoch_start > 0): optimizer, scheduler = swag_nn.loadModel(args.epoch_start, optimizer, scheduler, file_dir=args.ckpt_dir) # Create Burger time integrator # Here we will use 2nd order finite differences for spacial derivatives burgerInt = Burger2DIntegrate(args.dx, nu=args.nu, grad_kernels=[3, 3], device=args.device) # Progressively increase the time step to help stabailize training dtStep = 25 dtArr = np.linspace(np.log10(args.dt)-1, np.log10(args.dt), dtStep) dtArr = 10**(dtArr) # ========== Epoch loop ============ print('>>> Training network, lets rock') for epoch in range(args.epoch_start+1, args.epochs + 1): if(epoch == args.swag_start): print('Starting to sample weights every {:d} epochs'.format(args.swag_freq)) # Mannually set learning rate to swag sampling rate parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.swag_lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.swag_lr, weight_decay=0.0) scheduler = ExponentialLR(optimizer, gamma=0.9) dt = dtArr[min(epoch, dtArr.shape[0]-1)] # Number of timesteps to predict forward tsteps = np.zeros(len(training_loader)).astype(int) + int(90*min(epoch/75, 1) + 10) if(epoch >= args.swag_start): # Once SWAG sampling starts randomly pick a length to unroll. # This is done to help speed training up. tsteps = np.zeros(len(training_loader)).astype(int) + np.random.randint(10, int(90*min(epoch/75., 1.0) + 10), len(training_loader)) # Back-prop interval tback = np.zeros((len(training_loader))) + np.random.randint(2,5,tsteps.shape[0]) # Time-step to start training at tstart = np.zeros(tsteps.shape[0]) # Train network loss, mse = train(args, swag_nn, burgerInt, training_loader, optimizer, \ tsteps, tback, tstart, dt) print("Epoch: {}, Loss: {:0.5E}, MSE: {:0.5E}, Noise {:0.3f}" \ .format(epoch, loss, mse, swag_nn.base.beta())) # If not sampling weights we can adjust the learning rate if (epoch < args.swag_start + 10): # Update the learning rate scheduler.step() for param_group in optimizer.param_groups: print('Epoch {}, lr: {}'.format(epoch, param_group['lr'])) # Sample model parameters for SWAG posterior approx. # NOTE: 10 epoch burn in period with learning rate decay if(epoch >= args.swag_start + 10 and epoch % args.swag_freq == 0): print('Collecting model') swag_nn.collect() # Testing if(epoch % args.plot_freq == 0): n_test = 200 # Number to time-steps to test with torch.no_grad(): uPred, uTarget = test(args, swag_nn.base, testing_loader, tstep=n_test) # Construct domain for plotting tTest = np.arange(0, n_test*args.dt+1e-8, args.dt) xTest = np.linspace(x0, x1, args.nel+1) for bidx in range(2): plotPred(args, bidx, uPred[bidx].cpu().numpy(), uTarget[bidx].cpu().numpy(), tsteps=40, \ target_step=4, pred_step=4, epoch=epoch) # Plot samples from swag if(epoch > args.swag_start): with torch.no_grad(): uPred, uTarget = testSample(args, swag_nn, testing_loader, tstep=n_test, n_samples=8) # Plot samples at time 0.1 and 0.25 bidx = 0 for t in [10, 25]: plotSamples(args, bidx, uPred[bidx].detach().numpy(), uTarget[bidx].cpu().numpy(), tstep=t, epoch=epoch) # Save model periodically if(epoch % args.ckpt_freq == 0): swag_nn.saveModel(int(epoch), optimizer, scheduler, file_dir=args.ckpt_dir)
''' Tests the samples of the Bayesian SWAG model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for n_samples (int): number of model samples to draw test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel) for i in range(n_samples): print('Executing model sample {:d}'.format(i)) model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training model.eval() for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) if(i == 0): # Save target data u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)] u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0 # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target
identifier_body
main.py
''' 2D Coupled Burgers' system model === Distributed by: Notre Dame CICS (MIT Liscense) - Associated publication: url: http://www.sciencedirect.com/science/article/pii/S0021999119307612 doi: https://doi.org/10.1016/j.jcp.2019.109056 github: https://github.com/cics-nd/ar-pde-cnn === ''' from args import Parser from nn.denseEDcirc2d import DenseED from nn.bayesNN import BayesNN from nn.swag import SwagNN from nn.burger2DFiniteDifference import Burger2DIntegrate from utils.utils import mkdirs, toNumpy, toTuple from utils.burgerLoader2D import BurgerLoader from utils.post import plotPred, plotSamples from torch.optim.lr_scheduler import ExponentialLR import torch import torch.nn.functional as F import numpy as np import os, time def
(args, model, burgerInt, train_loader, optimizer, tsteps, tback, tstart, dt=0.1): ''' Trains the model Args: args (argparse): object with programs arguements model (PyTorch model): SWAG DenseED model to be tested burgerInt (BurgerIntegrate): 1D Burger system time integrator train_loader (dataloader): dataloader with training cases (use createTrainingLoader) optimizer (Pytorch Optm): optimzer tsteps (np.array): [mb] number of timesteps to predict for each mini-batch tback (np.array): [mb] number of timesteps to forward predict before back prop tstart (np.array): [mb] time-step to start updating model (kept at 0 for now) dt (float): current time-step size of the model (used to progressively increase time-step size) Returns: loss_total (float): negative log joint posterior mse_total (float): mean square error between the prediction and time-integrator ''' model.train() loss_total = 0 mse_total = 0 # Mini-batch loop for batch_idx, input in enumerate(train_loader): # input [b, 2, x, y] # Expand input to match model in channels dims = torch.ones(len(input.shape)) dims[1] = args.nic input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) loss = 0 # Loop for number of timesteps optimizer.zero_grad() for i in range(tsteps[batch_idx]): uPred = model(input[:,-2*args.nic:,:]) if(i < tstart[batch_idx]): # Don't calculate residual, just predict forward input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred[:,0,:].unsqueeze(1).detach() input = torch.cat([input, input0], dim=1) else: # Calculate loss # Start with implicit time integration ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt) # Calc. loss based on posterior of the model log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader)) loss = loss + log_joint loss_total = loss_total + loss.data.item() mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler # Back-prop through two timesteps if((i+1)%tback[batch_idx] == 0): loss.backward() loss = 0 optimizer.step() optimizer.zero_grad() input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) else: input0 = uPred input = torch.cat([input, input0], dim=1) if(batch_idx % 10 == 1): print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \ len(train_loader), int(100*batch_idx/len(train_loader)))) return loss_total/len(train_loader), mse_total/len(train_loader) def test(args, model, test_loader, tstep=100, test_every=2): ''' Tests the deterministic model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' model.eval() mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel) for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0 u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu() # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2): ''' Tests the samples of the Bayesian SWAG model Args: args (argparse): object with programs arguements model (PyTorch model): DenseED model to be tested test_loader (dataloader): dataloader with test cases (use createTestingLoader) tstep (int): number of timesteps to predict for n_samples (int): number of model samples to draw test_every (int): Time-step interval to test (must match simulator), default = 2 Returns: u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator ''' mb_size = int(len(test_loader.dataset)/len(test_loader)) u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel) u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel) for i in range(n_samples): print('Executing model sample {:d}'.format(i)) model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training model.eval() for bidx, (input0, uTarget0) in enumerate(test_loader): # Expand input to match model in channels dims = torch.ones(len(input0.shape)) dims[1] = args.nic input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device) if(i == 0): # Save target data u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)] u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0 # Auto-regress for t_idx in range(tstep): uPred = model(input[:,-2*args.nic:,:]) if((t_idx+1)%test_every == 0): u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred input = input[:,-2*int(args.nic-1):,:].detach() input0 = uPred.detach() input = torch.cat([input, input0], dim=1) return u_out, u_target if __name__ == '__main__': # Parse arguements args = Parser().parse() use_cuda = "cpu" if(torch.cuda.is_available()): use_cuda = "cuda" args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Torch device:{}".format(args.device)) # Domain settings, matches solver settings x0 = 0 x1 = 1.0 args.dx = (x1 - x0)/args.nel # Create training loader burgerLoader = BurgerLoader(dt=args.dt) training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size) # Create training loader test_cases = np.array([400, 401]).astype(int) testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2) # Create DenseED model denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc, blocks=args.blocks, growth_rate=args.growth_rate, init_features=args.init_features, bn_size=args.bn_size, drop_rate=args.drop_rate, bottleneck=False, out_activation=None).to(args.device) # Bayesian neural network bayes_nn = BayesNN(args, denseED) # Stochastic weighted averages swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max) # Optimizer parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0) # Learning rate scheduler scheduler = ExponentialLR(optimizer, gamma=0.995) # If we are starting from a specific epoch, attempt to load a model if(args.epoch_start > 0): optimizer, scheduler = swag_nn.loadModel(args.epoch_start, optimizer, scheduler, file_dir=args.ckpt_dir) # Create Burger time integrator # Here we will use 2nd order finite differences for spacial derivatives burgerInt = Burger2DIntegrate(args.dx, nu=args.nu, grad_kernels=[3, 3], device=args.device) # Progressively increase the time step to help stabailize training dtStep = 25 dtArr = np.linspace(np.log10(args.dt)-1, np.log10(args.dt), dtStep) dtArr = 10**(dtArr) # ========== Epoch loop ============ print('>>> Training network, lets rock') for epoch in range(args.epoch_start+1, args.epochs + 1): if(epoch == args.swag_start): print('Starting to sample weights every {:d} epochs'.format(args.swag_freq)) # Mannually set learning rate to swag sampling rate parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.swag_lr_beta}, {'params': bayes_nn.model.features.parameters()}] optimizer = torch.optim.Adam(parameters, lr=args.swag_lr, weight_decay=0.0) scheduler = ExponentialLR(optimizer, gamma=0.9) dt = dtArr[min(epoch, dtArr.shape[0]-1)] # Number of timesteps to predict forward tsteps = np.zeros(len(training_loader)).astype(int) + int(90*min(epoch/75, 1) + 10) if(epoch >= args.swag_start): # Once SWAG sampling starts randomly pick a length to unroll. # This is done to help speed training up. tsteps = np.zeros(len(training_loader)).astype(int) + np.random.randint(10, int(90*min(epoch/75., 1.0) + 10), len(training_loader)) # Back-prop interval tback = np.zeros((len(training_loader))) + np.random.randint(2,5,tsteps.shape[0]) # Time-step to start training at tstart = np.zeros(tsteps.shape[0]) # Train network loss, mse = train(args, swag_nn, burgerInt, training_loader, optimizer, \ tsteps, tback, tstart, dt) print("Epoch: {}, Loss: {:0.5E}, MSE: {:0.5E}, Noise {:0.3f}" \ .format(epoch, loss, mse, swag_nn.base.beta())) # If not sampling weights we can adjust the learning rate if (epoch < args.swag_start + 10): # Update the learning rate scheduler.step() for param_group in optimizer.param_groups: print('Epoch {}, lr: {}'.format(epoch, param_group['lr'])) # Sample model parameters for SWAG posterior approx. # NOTE: 10 epoch burn in period with learning rate decay if(epoch >= args.swag_start + 10 and epoch % args.swag_freq == 0): print('Collecting model') swag_nn.collect() # Testing if(epoch % args.plot_freq == 0): n_test = 200 # Number to time-steps to test with torch.no_grad(): uPred, uTarget = test(args, swag_nn.base, testing_loader, tstep=n_test) # Construct domain for plotting tTest = np.arange(0, n_test*args.dt+1e-8, args.dt) xTest = np.linspace(x0, x1, args.nel+1) for bidx in range(2): plotPred(args, bidx, uPred[bidx].cpu().numpy(), uTarget[bidx].cpu().numpy(), tsteps=40, \ target_step=4, pred_step=4, epoch=epoch) # Plot samples from swag if(epoch > args.swag_start): with torch.no_grad(): uPred, uTarget = testSample(args, swag_nn, testing_loader, tstep=n_test, n_samples=8) # Plot samples at time 0.1 and 0.25 bidx = 0 for t in [10, 25]: plotSamples(args, bidx, uPred[bidx].detach().numpy(), uTarget[bidx].cpu().numpy(), tstep=t, epoch=epoch) # Save model periodically if(epoch % args.ckpt_freq == 0): swag_nn.saveModel(int(epoch), optimizer, scheduler, file_dir=args.ckpt_dir)
train
identifier_name
index.js
const Discord = require('discord.js'); const config = require('./config.json'); const nodemailer = require("nodemailer"); const showdown = require('showdown'); const randtoken = require('rand-token'); const Keyv = require('keyv'); const crypto = require('crypto'); const os = require("os"); const hostname = os.hostname(); /** * hash password with sha512. * source: https://ciphertrick.com/salt-hash-passwords-using-nodejs-crypto/ * @function * @param {string} toHash - List of required fields. * @param {string} salt - Data to be validated. */ const sha512 = function (toHash, salt) { let hash = crypto.createHmac('sha512', salt); /** Hashing algorithm sha512 */ hash.update(toHash); return hash.digest('hex'); }; // source: https://discordjs.guide/miscellaneous/parsing-mention-arguments.html#using-regular-expressions function getUserFromMention(mention) { // The id is the first and only match found by the RegEx. const matches = mention.match(/^<@!?(\d+)>$/); // If supplied variable was not a mention, matches will be null instead of an array. if (!matches) return; // However the first element in the matches array will be the entire mention, not just the ID, so use index 1. const id = matches[1]; return client.users.cache.get(id); } const HOURS_TO_MILLISECONDS = 3600 * 1000; const client = new Discord.Client(); const converter = new showdown.Converter(); // use Keyv with sqlite storage const sqlite_uri = "sqlite://db.sqlite3"; const discordUserId2token = new Keyv(sqlite_uri, { namespace: "discord_user_id_to_token" }); // Discord User-ID / token pairs const token2nethzHash = new Keyv(sqlite_uri, { namespace: "token_to_nethz_hash" }); // nethz / token pairs const verifiedNethzHashs = new Keyv(sqlite_uri, { namespace: "verified_nethz_hashs" }); // the set of hashs of nethzs already used for verification (only the keys are relevant; value is always `true`) discordUserId2token.on('error', err => console.error('Keyv connection error:', err)); token2nethzHash.on('error', err => console.error('Keyv connection error:', err)); verifiedNethzHashs.on('error', err => console.error('Keyv connection error:', err)); client.login(config.token); const botMail = config.transportOptions.auth; const sampleNethz = "jsmith"; const sampleToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"; const sampleDiscordUsername = "john_sm_01"; const availableCommandsStr = `Available commands: \`!ping\`: make me say Pong \`!nethz\`: tell me your nethz; e.g \`!nethz ${sampleNethz}\` \`!token\`: tell me the token I sent you; e.g \`!token ${sampleToken}\` \`!welcomeagain\`: **print the welcome message again, with all the instructions for the verification process** \`!help\`: print this message `; const adminCommandsStr = `Admin-only commands: \`!unmark\` (admin only): unmark a nethz as "already used for verification"; e.g \`!unmark ${sampleNethz}\` \`!mark\` (admin only): mark a nethz as "already used for verification"; e.g \`!mark ${sampleNethz}\` \`!purgereqs\` (admin only): delete all active tokens, by clearing discordUserId2token and token2nethzHash WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification... \`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs. WARNING: doing this is rarely a good idea... \`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\` \`!adminhelp\` (admin only): print this message (Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.) `;
const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**. You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels. To verify yourself as an ETH student, 1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz; e.g: \`!nethz ${sampleNethz}\` 2. I will send an email at <nethz>@student.ethz.ch containing a token 3. then, show me that you did receive the token, by telling me: \`!token \` + the token; e.g: \`!token ${sampleToken}\` Remarks: - To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.) - My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.) - Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration. - I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa. I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly. `; const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n ${token}\n If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n \nBest regards,\n ${botName} `; // create reusable transporter object using the default SMTP transport const transporter = nodemailer.createTransport(config.transportOptions); // verify connection configuration transporter.verify(function (error, success) { if (error) { console.log(error); } else { console.assert(success); console.log("SMTP server is ready to take our messages"); } }); client.once('ready', async () => { const theGuild = client.guilds.cache.get(config.theGuildId); if (!theGuild.available) { console.warn("theGuild.available is false (it indicates a server outage)"); } // check that the bot can read/write in the config.adminChannelName channel const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName); const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES']; if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) { throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`); } // create role config.roleName if does not exist if (!theGuild.roles.cache.some(role => role.name === config.roleName)) { theGuild.createRole({ name: config.roleName }) .then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`)) .catch(console.error); } // check that we can send email const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`; const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, to: botMail.user, subject: `Test email (${client.user.username} bot startup)`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log('Ready!'); }); const prefix = config.prefix; client.on('message', async message => { if (message.author.bot) return; if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) { if (!message.content.startsWith(prefix)) return; const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); if (command === 'unmark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (! await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is not currently marked as "already used for verification". No action was performed.`); } else { await verifiedNethzHashs.delete(nethzHash); return message.channel.send(`Unmarked nethz ${nethz} as "already used for verification".`); } } } else if (command === 'mark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!mark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!mark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is already marked as "already used for verification". No action was performed.`); } else { await verifiedNethzHashs.set(nethzHash, true); return message.channel.send(`Marked nethz ${nethz} as "already used for verification".`); } } } else if (command === 'purgereqs') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await discordUserId2token.clear(); await token2nethzHash.clear(); return message.channel.send(`Cleared all active verification tokens from database. Tip: this leads to unexpected behaviour from the point of view of the users; it might be a good idea to put a message on a public channel to explain what happened.`); } else if (command === 'purgemarks') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await verifiedNethzHashs.clear(); return message.channel.send(`Unmarked all previously marked nethzs as "already used for verification".`); } else if (command === 'verify') { // unusable as it is, because cannot mention Discord users in the admin channel if they are not in it. TODO if (!args.length) { return message.channel.send(`You didn't provide any (Discord) user to verify! Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else { const user = getUserFromMention(args[0]); if (!user) { return message.channel.send("Please use a proper mention!"); } const theGuild = client.guilds.cache.get(config.theGuildId); const member = theGuild.members.cache.get(user.id); if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`That user already has the "${config.roleName}" role!`); } const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); return message.channel.send(`<@${user.id}> now has the "${config.roleName}" role, and has access to the student-only channels.`); } } else if (command === 'adminhelp') { return message.channel.send(adminCommandsStr); } else { return message.reply(`admin-command not understood: ${command}. ${adminCommandsStr}`); } } else if (message.channel.type === 'dm') { if (!message.content.startsWith(prefix)) { return message.channel.send(`I am a very stupid bot, I only respond to commands. ${availableCommandsStr}`); } const theGuild = client.guilds.cache.get(config.theGuildId); const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); const user = message.author; // user (: User) and member (: GuildMember) refer to the same person (`member.user` is `user`), but member holds information about the relation to the guild // const member = theGuild.members.cache.get(user.id); const member = await theGuild.members.fetch(user.id); if (command === 'ping') { return message.channel.send('Pong'); } else if (command === 'nethz') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!nethz ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!nethz ${sampleNethz}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz was already used to verify a different Discord user. If you did not do it, your nethz and/or ETH mail inbox may have been used by another person! (Or maybe you left the server and joined again.) Either way, please contact an administrator of **${theGuild.name}**.`); } else { if (await discordUserId2token.get(user.id)) { // invalidate the previous token const prevToken = await discordUserId2token.get(user.id); await token2nethzHash.delete(prevToken); await discordUserId2token.delete(user.id); } const newToken = randtoken.uid(16); // save newToken, along with user.username and user.id, and set expiration time await discordUserId2token.set(user.id, newToken, config.tokenTTL * HOURS_TO_MILLISECONDS); await token2nethzHash.set(newToken, nethzHash, config.tokenTTL * HOURS_TO_MILLISECONDS); // send token mail with defined transport object const textContent = genMailContent(user.username, newToken, theGuild.name, client.user.username); const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, cc: botMail.user, to: `${nethz}@student.ethz.ch`, subject: `Verify your identity on Discord server ${theGuild.name}`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log(`token-email was sent for Discord user ${user.username}`); return message.channel.send(`An email was sent to ${nethz}@student.ethz.ch, containing a token that you should now report back to me, using the \`!token\` command.`); } } } else if (command === 'token') { if (!args.length) { return message.channel.send(`You didn't write any token! Usage: e.g \`!token ${sampleToken}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!token ${sampleToken}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else if (!await discordUserId2token.get(user.id)) { return message.channel.send(`You haven't provided a nethz to verify yourself as!`); } else { const token = args[0]; const trueToken = await discordUserId2token.get(user.id); if (token !== trueToken) { return message.channel.send(`This is not the right token.`); } else { const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); const nethzHash = await token2nethzHash.get(token); // store a hash of this nethz to prevent this student from verifying multiple Discord users console.assert(!await verifiedNethzHashs.get(nethzHash)); await verifiedNethzHashs.set(nethzHash, true); await discordUserId2token.delete(user.id); // forget the token await token2nethzHash.delete(token); return message.channel.send(`Congratulations, you now have the "${config.roleName}" role on **${theGuild.name}**, so you have access to the student-only channels! No further action is required. (Note: you will *not* receive any confirmation email in your ETH inbox, since I never stored your nethz.)`); // TODO: maybe optionally send a greetings message in the #welcome channel } } } else if (command === 'welcomeagain') { return message.channel.send(`Please find the initial welcome message, with all the instructions for the verification process, below: \n\n${welcomeMsg(theGuild.name)}`); } else if (command === 'help') { return message.channel.send(availableCommandsStr); } else { return message.reply(`command not understood: ${command}. ${availableCommandsStr}`); } } }); client.on('guildMemberAdd', async member => { if (member.guild.id === config.theGuildId) { const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(welcomeMsg(member.guild.name)) .then(message => console.log(`Sent welcome message to member: ${member.guild.name}`)) .catch(console.error); } else { console.log("Detected a guildMemberAdd but the guild id does not match. No action was taken."); // (for debug) console.log(`member.guild.id: ${member.guild.id}; config.theGuildId: ${config.theGuildId}`); } }); client.on('guildMemberRemove', async member => { const discordUserId = member.user.id; const token = await discordUserId2token.get(discordUserId); // may be `undefined` if no such key if (member.roles.cache.some(role => role.name === config.roleName)) { // if this user was already verified console.assert(token === undefined); const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(`Hello again! I see you just left the server ${member.guild.name}, on which you were verified as an ETH student using your ETH mail. Please note that your nethz is still marked as "already used for verification". This is because I cannot tell what your nethz is from your Discord account. If you wish to join ${member.guild.name} again and verify yourself as an ETH student again, please contact one of ${member.guild.name}'s admins, so that they can unmark your nethz as "already used" manually.`); } else if (token) { // if this user was pending verification, reset the verif process for her await discordUserId2token.delete(member.user.id); await token2nethzHash.delete(token); } });
random_line_split
index.js
const Discord = require('discord.js'); const config = require('./config.json'); const nodemailer = require("nodemailer"); const showdown = require('showdown'); const randtoken = require('rand-token'); const Keyv = require('keyv'); const crypto = require('crypto'); const os = require("os"); const hostname = os.hostname(); /** * hash password with sha512. * source: https://ciphertrick.com/salt-hash-passwords-using-nodejs-crypto/ * @function * @param {string} toHash - List of required fields. * @param {string} salt - Data to be validated. */ const sha512 = function (toHash, salt) { let hash = crypto.createHmac('sha512', salt); /** Hashing algorithm sha512 */ hash.update(toHash); return hash.digest('hex'); }; // source: https://discordjs.guide/miscellaneous/parsing-mention-arguments.html#using-regular-expressions function getUserFromMention(mention) { // The id is the first and only match found by the RegEx. const matches = mention.match(/^<@!?(\d+)>$/); // If supplied variable was not a mention, matches will be null instead of an array. if (!matches) return; // However the first element in the matches array will be the entire mention, not just the ID, so use index 1. const id = matches[1]; return client.users.cache.get(id); } const HOURS_TO_MILLISECONDS = 3600 * 1000; const client = new Discord.Client(); const converter = new showdown.Converter(); // use Keyv with sqlite storage const sqlite_uri = "sqlite://db.sqlite3"; const discordUserId2token = new Keyv(sqlite_uri, { namespace: "discord_user_id_to_token" }); // Discord User-ID / token pairs const token2nethzHash = new Keyv(sqlite_uri, { namespace: "token_to_nethz_hash" }); // nethz / token pairs const verifiedNethzHashs = new Keyv(sqlite_uri, { namespace: "verified_nethz_hashs" }); // the set of hashs of nethzs already used for verification (only the keys are relevant; value is always `true`) discordUserId2token.on('error', err => console.error('Keyv connection error:', err)); token2nethzHash.on('error', err => console.error('Keyv connection error:', err)); verifiedNethzHashs.on('error', err => console.error('Keyv connection error:', err)); client.login(config.token); const botMail = config.transportOptions.auth; const sampleNethz = "jsmith"; const sampleToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"; const sampleDiscordUsername = "john_sm_01"; const availableCommandsStr = `Available commands: \`!ping\`: make me say Pong \`!nethz\`: tell me your nethz; e.g \`!nethz ${sampleNethz}\` \`!token\`: tell me the token I sent you; e.g \`!token ${sampleToken}\` \`!welcomeagain\`: **print the welcome message again, with all the instructions for the verification process** \`!help\`: print this message `; const adminCommandsStr = `Admin-only commands: \`!unmark\` (admin only): unmark a nethz as "already used for verification"; e.g \`!unmark ${sampleNethz}\` \`!mark\` (admin only): mark a nethz as "already used for verification"; e.g \`!mark ${sampleNethz}\` \`!purgereqs\` (admin only): delete all active tokens, by clearing discordUserId2token and token2nethzHash WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification... \`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs. WARNING: doing this is rarely a good idea... \`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\` \`!adminhelp\` (admin only): print this message (Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.) `; const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**. You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels. To verify yourself as an ETH student, 1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz; e.g: \`!nethz ${sampleNethz}\` 2. I will send an email at <nethz>@student.ethz.ch containing a token 3. then, show me that you did receive the token, by telling me: \`!token \` + the token; e.g: \`!token ${sampleToken}\` Remarks: - To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.) - My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.) - Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration. - I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa. I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly. `; const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n ${token}\n If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n \nBest regards,\n ${botName} `; // create reusable transporter object using the default SMTP transport const transporter = nodemailer.createTransport(config.transportOptions); // verify connection configuration transporter.verify(function (error, success) { if (error) { console.log(error); } else { console.assert(success); console.log("SMTP server is ready to take our messages"); } }); client.once('ready', async () => { const theGuild = client.guilds.cache.get(config.theGuildId); if (!theGuild.available) { console.warn("theGuild.available is false (it indicates a server outage)"); } // check that the bot can read/write in the config.adminChannelName channel const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName); const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES']; if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) { throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`); } // create role config.roleName if does not exist if (!theGuild.roles.cache.some(role => role.name === config.roleName)) { theGuild.createRole({ name: config.roleName }) .then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`)) .catch(console.error); } // check that we can send email const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`; const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, to: botMail.user, subject: `Test email (${client.user.username} bot startup)`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log('Ready!'); }); const prefix = config.prefix; client.on('message', async message => { if (message.author.bot) return; if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) { if (!message.content.startsWith(prefix)) return; const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); if (command === 'unmark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (! await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is not currently marked as "already used for verification". No action was performed.`); } else
} } else if (command === 'mark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!mark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!mark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is already marked as "already used for verification". No action was performed.`); } else { await verifiedNethzHashs.set(nethzHash, true); return message.channel.send(`Marked nethz ${nethz} as "already used for verification".`); } } } else if (command === 'purgereqs') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await discordUserId2token.clear(); await token2nethzHash.clear(); return message.channel.send(`Cleared all active verification tokens from database. Tip: this leads to unexpected behaviour from the point of view of the users; it might be a good idea to put a message on a public channel to explain what happened.`); } else if (command === 'purgemarks') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await verifiedNethzHashs.clear(); return message.channel.send(`Unmarked all previously marked nethzs as "already used for verification".`); } else if (command === 'verify') { // unusable as it is, because cannot mention Discord users in the admin channel if they are not in it. TODO if (!args.length) { return message.channel.send(`You didn't provide any (Discord) user to verify! Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else { const user = getUserFromMention(args[0]); if (!user) { return message.channel.send("Please use a proper mention!"); } const theGuild = client.guilds.cache.get(config.theGuildId); const member = theGuild.members.cache.get(user.id); if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`That user already has the "${config.roleName}" role!`); } const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); return message.channel.send(`<@${user.id}> now has the "${config.roleName}" role, and has access to the student-only channels.`); } } else if (command === 'adminhelp') { return message.channel.send(adminCommandsStr); } else { return message.reply(`admin-command not understood: ${command}. ${adminCommandsStr}`); } } else if (message.channel.type === 'dm') { if (!message.content.startsWith(prefix)) { return message.channel.send(`I am a very stupid bot, I only respond to commands. ${availableCommandsStr}`); } const theGuild = client.guilds.cache.get(config.theGuildId); const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); const user = message.author; // user (: User) and member (: GuildMember) refer to the same person (`member.user` is `user`), but member holds information about the relation to the guild // const member = theGuild.members.cache.get(user.id); const member = await theGuild.members.fetch(user.id); if (command === 'ping') { return message.channel.send('Pong'); } else if (command === 'nethz') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!nethz ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!nethz ${sampleNethz}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz was already used to verify a different Discord user. If you did not do it, your nethz and/or ETH mail inbox may have been used by another person! (Or maybe you left the server and joined again.) Either way, please contact an administrator of **${theGuild.name}**.`); } else { if (await discordUserId2token.get(user.id)) { // invalidate the previous token const prevToken = await discordUserId2token.get(user.id); await token2nethzHash.delete(prevToken); await discordUserId2token.delete(user.id); } const newToken = randtoken.uid(16); // save newToken, along with user.username and user.id, and set expiration time await discordUserId2token.set(user.id, newToken, config.tokenTTL * HOURS_TO_MILLISECONDS); await token2nethzHash.set(newToken, nethzHash, config.tokenTTL * HOURS_TO_MILLISECONDS); // send token mail with defined transport object const textContent = genMailContent(user.username, newToken, theGuild.name, client.user.username); const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, cc: botMail.user, to: `${nethz}@student.ethz.ch`, subject: `Verify your identity on Discord server ${theGuild.name}`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log(`token-email was sent for Discord user ${user.username}`); return message.channel.send(`An email was sent to ${nethz}@student.ethz.ch, containing a token that you should now report back to me, using the \`!token\` command.`); } } } else if (command === 'token') { if (!args.length) { return message.channel.send(`You didn't write any token! Usage: e.g \`!token ${sampleToken}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!token ${sampleToken}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else if (!await discordUserId2token.get(user.id)) { return message.channel.send(`You haven't provided a nethz to verify yourself as!`); } else { const token = args[0]; const trueToken = await discordUserId2token.get(user.id); if (token !== trueToken) { return message.channel.send(`This is not the right token.`); } else { const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); const nethzHash = await token2nethzHash.get(token); // store a hash of this nethz to prevent this student from verifying multiple Discord users console.assert(!await verifiedNethzHashs.get(nethzHash)); await verifiedNethzHashs.set(nethzHash, true); await discordUserId2token.delete(user.id); // forget the token await token2nethzHash.delete(token); return message.channel.send(`Congratulations, you now have the "${config.roleName}" role on **${theGuild.name}**, so you have access to the student-only channels! No further action is required. (Note: you will *not* receive any confirmation email in your ETH inbox, since I never stored your nethz.)`); // TODO: maybe optionally send a greetings message in the #welcome channel } } } else if (command === 'welcomeagain') { return message.channel.send(`Please find the initial welcome message, with all the instructions for the verification process, below: \n\n${welcomeMsg(theGuild.name)}`); } else if (command === 'help') { return message.channel.send(availableCommandsStr); } else { return message.reply(`command not understood: ${command}. ${availableCommandsStr}`); } } }); client.on('guildMemberAdd', async member => { if (member.guild.id === config.theGuildId) { const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(welcomeMsg(member.guild.name)) .then(message => console.log(`Sent welcome message to member: ${member.guild.name}`)) .catch(console.error); } else { console.log("Detected a guildMemberAdd but the guild id does not match. No action was taken."); // (for debug) console.log(`member.guild.id: ${member.guild.id}; config.theGuildId: ${config.theGuildId}`); } }); client.on('guildMemberRemove', async member => { const discordUserId = member.user.id; const token = await discordUserId2token.get(discordUserId); // may be `undefined` if no such key if (member.roles.cache.some(role => role.name === config.roleName)) { // if this user was already verified console.assert(token === undefined); const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(`Hello again! I see you just left the server ${member.guild.name}, on which you were verified as an ETH student using your ETH mail. Please note that your nethz is still marked as "already used for verification". This is because I cannot tell what your nethz is from your Discord account. If you wish to join ${member.guild.name} again and verify yourself as an ETH student again, please contact one of ${member.guild.name}'s admins, so that they can unmark your nethz as "already used" manually.`); } else if (token) { // if this user was pending verification, reset the verif process for her await discordUserId2token.delete(member.user.id); await token2nethzHash.delete(token); } });
{ await verifiedNethzHashs.delete(nethzHash); return message.channel.send(`Unmarked nethz ${nethz} as "already used for verification".`); }
conditional_block
index.js
const Discord = require('discord.js'); const config = require('./config.json'); const nodemailer = require("nodemailer"); const showdown = require('showdown'); const randtoken = require('rand-token'); const Keyv = require('keyv'); const crypto = require('crypto'); const os = require("os"); const hostname = os.hostname(); /** * hash password with sha512. * source: https://ciphertrick.com/salt-hash-passwords-using-nodejs-crypto/ * @function * @param {string} toHash - List of required fields. * @param {string} salt - Data to be validated. */ const sha512 = function (toHash, salt) { let hash = crypto.createHmac('sha512', salt); /** Hashing algorithm sha512 */ hash.update(toHash); return hash.digest('hex'); }; // source: https://discordjs.guide/miscellaneous/parsing-mention-arguments.html#using-regular-expressions function
(mention) { // The id is the first and only match found by the RegEx. const matches = mention.match(/^<@!?(\d+)>$/); // If supplied variable was not a mention, matches will be null instead of an array. if (!matches) return; // However the first element in the matches array will be the entire mention, not just the ID, so use index 1. const id = matches[1]; return client.users.cache.get(id); } const HOURS_TO_MILLISECONDS = 3600 * 1000; const client = new Discord.Client(); const converter = new showdown.Converter(); // use Keyv with sqlite storage const sqlite_uri = "sqlite://db.sqlite3"; const discordUserId2token = new Keyv(sqlite_uri, { namespace: "discord_user_id_to_token" }); // Discord User-ID / token pairs const token2nethzHash = new Keyv(sqlite_uri, { namespace: "token_to_nethz_hash" }); // nethz / token pairs const verifiedNethzHashs = new Keyv(sqlite_uri, { namespace: "verified_nethz_hashs" }); // the set of hashs of nethzs already used for verification (only the keys are relevant; value is always `true`) discordUserId2token.on('error', err => console.error('Keyv connection error:', err)); token2nethzHash.on('error', err => console.error('Keyv connection error:', err)); verifiedNethzHashs.on('error', err => console.error('Keyv connection error:', err)); client.login(config.token); const botMail = config.transportOptions.auth; const sampleNethz = "jsmith"; const sampleToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"; const sampleDiscordUsername = "john_sm_01"; const availableCommandsStr = `Available commands: \`!ping\`: make me say Pong \`!nethz\`: tell me your nethz; e.g \`!nethz ${sampleNethz}\` \`!token\`: tell me the token I sent you; e.g \`!token ${sampleToken}\` \`!welcomeagain\`: **print the welcome message again, with all the instructions for the verification process** \`!help\`: print this message `; const adminCommandsStr = `Admin-only commands: \`!unmark\` (admin only): unmark a nethz as "already used for verification"; e.g \`!unmark ${sampleNethz}\` \`!mark\` (admin only): mark a nethz as "already used for verification"; e.g \`!mark ${sampleNethz}\` \`!purgereqs\` (admin only): delete all active tokens, by clearing discordUserId2token and token2nethzHash WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification... \`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs. WARNING: doing this is rarely a good idea... \`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\` \`!adminhelp\` (admin only): print this message (Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.) `; const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**. You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels. To verify yourself as an ETH student, 1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz; e.g: \`!nethz ${sampleNethz}\` 2. I will send an email at <nethz>@student.ethz.ch containing a token 3. then, show me that you did receive the token, by telling me: \`!token \` + the token; e.g: \`!token ${sampleToken}\` Remarks: - To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.) - My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.) - Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration. - I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa. I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly. `; const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n ${token}\n If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n \nBest regards,\n ${botName} `; // create reusable transporter object using the default SMTP transport const transporter = nodemailer.createTransport(config.transportOptions); // verify connection configuration transporter.verify(function (error, success) { if (error) { console.log(error); } else { console.assert(success); console.log("SMTP server is ready to take our messages"); } }); client.once('ready', async () => { const theGuild = client.guilds.cache.get(config.theGuildId); if (!theGuild.available) { console.warn("theGuild.available is false (it indicates a server outage)"); } // check that the bot can read/write in the config.adminChannelName channel const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName); const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES']; if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) { throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`); } // create role config.roleName if does not exist if (!theGuild.roles.cache.some(role => role.name === config.roleName)) { theGuild.createRole({ name: config.roleName }) .then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`)) .catch(console.error); } // check that we can send email const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`; const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, to: botMail.user, subject: `Test email (${client.user.username} bot startup)`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log('Ready!'); }); const prefix = config.prefix; client.on('message', async message => { if (message.author.bot) return; if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) { if (!message.content.startsWith(prefix)) return; const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); if (command === 'unmark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (! await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is not currently marked as "already used for verification". No action was performed.`); } else { await verifiedNethzHashs.delete(nethzHash); return message.channel.send(`Unmarked nethz ${nethz} as "already used for verification".`); } } } else if (command === 'mark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!mark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!mark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz ${nethz} is already marked as "already used for verification". No action was performed.`); } else { await verifiedNethzHashs.set(nethzHash, true); return message.channel.send(`Marked nethz ${nethz} as "already used for verification".`); } } } else if (command === 'purgereqs') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await discordUserId2token.clear(); await token2nethzHash.clear(); return message.channel.send(`Cleared all active verification tokens from database. Tip: this leads to unexpected behaviour from the point of view of the users; it might be a good idea to put a message on a public channel to explain what happened.`); } else if (command === 'purgemarks') { if (args.length) { message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`); } await verifiedNethzHashs.clear(); return message.channel.send(`Unmarked all previously marked nethzs as "already used for verification".`); } else if (command === 'verify') { // unusable as it is, because cannot mention Discord users in the admin channel if they are not in it. TODO if (!args.length) { return message.channel.send(`You didn't provide any (Discord) user to verify! Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!verify ${sampleDiscordUsername}\``); } else { const user = getUserFromMention(args[0]); if (!user) { return message.channel.send("Please use a proper mention!"); } const theGuild = client.guilds.cache.get(config.theGuildId); const member = theGuild.members.cache.get(user.id); if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`That user already has the "${config.roleName}" role!`); } const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); return message.channel.send(`<@${user.id}> now has the "${config.roleName}" role, and has access to the student-only channels.`); } } else if (command === 'adminhelp') { return message.channel.send(adminCommandsStr); } else { return message.reply(`admin-command not understood: ${command}. ${adminCommandsStr}`); } } else if (message.channel.type === 'dm') { if (!message.content.startsWith(prefix)) { return message.channel.send(`I am a very stupid bot, I only respond to commands. ${availableCommandsStr}`); } const theGuild = client.guilds.cache.get(config.theGuildId); const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); const user = message.author; // user (: User) and member (: GuildMember) refer to the same person (`member.user` is `user`), but member holds information about the relation to the guild // const member = theGuild.members.cache.get(user.id); const member = await theGuild.members.fetch(user.id); if (command === 'ping') { return message.channel.send('Pong'); } else if (command === 'nethz') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!nethz ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!nethz ${sampleNethz}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (await verifiedNethzHashs.get(nethzHash)) { return message.channel.send(`This nethz was already used to verify a different Discord user. If you did not do it, your nethz and/or ETH mail inbox may have been used by another person! (Or maybe you left the server and joined again.) Either way, please contact an administrator of **${theGuild.name}**.`); } else { if (await discordUserId2token.get(user.id)) { // invalidate the previous token const prevToken = await discordUserId2token.get(user.id); await token2nethzHash.delete(prevToken); await discordUserId2token.delete(user.id); } const newToken = randtoken.uid(16); // save newToken, along with user.username and user.id, and set expiration time await discordUserId2token.set(user.id, newToken, config.tokenTTL * HOURS_TO_MILLISECONDS); await token2nethzHash.set(newToken, nethzHash, config.tokenTTL * HOURS_TO_MILLISECONDS); // send token mail with defined transport object const textContent = genMailContent(user.username, newToken, theGuild.name, client.user.username); const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, cc: botMail.user, to: `${nethz}@student.ethz.ch`, subject: `Verify your identity on Discord server ${theGuild.name}`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log(`token-email was sent for Discord user ${user.username}`); return message.channel.send(`An email was sent to ${nethz}@student.ethz.ch, containing a token that you should now report back to me, using the \`!token\` command.`); } } } else if (command === 'token') { if (!args.length) { return message.channel.send(`You didn't write any token! Usage: e.g \`!token ${sampleToken}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!token ${sampleToken}\``); } else if (member.roles.cache.some(role => role.name === config.roleName)) { return message.channel.send(`You are already verified as an ETH student on the Discord server **${theGuild.name}**!`); } else if (!await discordUserId2token.get(user.id)) { return message.channel.send(`You haven't provided a nethz to verify yourself as!`); } else { const token = args[0]; const trueToken = await discordUserId2token.get(user.id); if (token !== trueToken) { return message.channel.send(`This is not the right token.`); } else { const role = theGuild.roles.cache.find(role => role.name === config.roleName); member.roles.add(role); const nethzHash = await token2nethzHash.get(token); // store a hash of this nethz to prevent this student from verifying multiple Discord users console.assert(!await verifiedNethzHashs.get(nethzHash)); await verifiedNethzHashs.set(nethzHash, true); await discordUserId2token.delete(user.id); // forget the token await token2nethzHash.delete(token); return message.channel.send(`Congratulations, you now have the "${config.roleName}" role on **${theGuild.name}**, so you have access to the student-only channels! No further action is required. (Note: you will *not* receive any confirmation email in your ETH inbox, since I never stored your nethz.)`); // TODO: maybe optionally send a greetings message in the #welcome channel } } } else if (command === 'welcomeagain') { return message.channel.send(`Please find the initial welcome message, with all the instructions for the verification process, below: \n\n${welcomeMsg(theGuild.name)}`); } else if (command === 'help') { return message.channel.send(availableCommandsStr); } else { return message.reply(`command not understood: ${command}. ${availableCommandsStr}`); } } }); client.on('guildMemberAdd', async member => { if (member.guild.id === config.theGuildId) { const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(welcomeMsg(member.guild.name)) .then(message => console.log(`Sent welcome message to member: ${member.guild.name}`)) .catch(console.error); } else { console.log("Detected a guildMemberAdd but the guild id does not match. No action was taken."); // (for debug) console.log(`member.guild.id: ${member.guild.id}; config.theGuildId: ${config.theGuildId}`); } }); client.on('guildMemberRemove', async member => { const discordUserId = member.user.id; const token = await discordUserId2token.get(discordUserId); // may be `undefined` if no such key if (member.roles.cache.some(role => role.name === config.roleName)) { // if this user was already verified console.assert(token === undefined); const dmc = member.user.dmChannel || await member.user.createDM(); dmc.send(`Hello again! I see you just left the server ${member.guild.name}, on which you were verified as an ETH student using your ETH mail. Please note that your nethz is still marked as "already used for verification". This is because I cannot tell what your nethz is from your Discord account. If you wish to join ${member.guild.name} again and verify yourself as an ETH student again, please contact one of ${member.guild.name}'s admins, so that they can unmark your nethz as "already used" manually.`); } else if (token) { // if this user was pending verification, reset the verif process for her await discordUserId2token.delete(member.user.id); await token2nethzHash.delete(token); } });
getUserFromMention
identifier_name