file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
pipeline.py | (img):
new_img = cv2.GaussianBlur(img, (3,3), 0)
#new_img = cv2.cvtColor(new_img, cv2.COLOR_YUV2RGB)
new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2HSV)
new_img = np.array(new_img, dtype = np.float64)
#Generate new random brightness
random_bright = .5+random.uniform(0.3,1.0)
new_img[:,:,2] = random_bright*new_img[:,:,2]
new_img[:,:,2][new_img[:,:,2]>255] = 255
new_img = np.array(new_img, dtype = np.uint8)
#Convert back to RGB colorspace
new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB)
#new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV)
return new_img
# Read in cars and notcars
images = glob.glob('./dataset/*.png')
cars = []
notcars = []
for image in images:
cars.append(image)
images = glob.glob('./dataset_nonv/*.png')
for image in images:
notcars.append(image)
# Reduce the sample size because
# The quiz evaluator times out after 13s of CPU time
#sample_size = 500
#cars = cars[0:sample_size]
#notcars = notcars[0:sample_size]
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 32 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [400, 656] # Min and max in y to search in slide_window()
def train_model(cars, notcars):
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
svr = svm.SVC()
svc = GridSearchCV(svr, parameters)
#svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
#model = pickle.dump(svc, 'model.pkl')
with open('model.p', 'wb') as f:
pickle.dump((svc, X_scaler), f)
#return svc, X_scaler
#image = mpimg.imread('test1.jpg')
#draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
#windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
# xy_window=(96, 96), xy_overlap=(0.5, 0.5))
#hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
#window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
#plt.imshow(window_img)
count =6
last_labels = []
history = deque(maxlen=5)
def find_vehicles_in_frame(image):
global count
global last_labels
#if count < 2:
# count = count + 1
# draw_img = draw_labeled_bboxes(np.copy(image), last_labels)
# return draw_img
#draw_img = draw_labeled_bboxes(np.copy(image), labels)
#return draw_img
#else:
#print(count)
ystart = 400
ystop = 656
scale = 1.5
box_list = []
#image = mpimg.imread('test1.jpg')
svc, X_scaler = pickle.load( open("model.p", "rb" ) )
box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
last_box_list = box_list
#ystart = 355
#ystop = 550
#scale = 1.5
#box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#box_list = box_list1 + box_list2
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
history.append(heat)
if count >=6:
#print('draw')
#print(history)
hist1 = 0
hist2 = 0 # | augment_image | identifier_name |
|
pipeline.py | 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [400, 656] # Min and max in y to search in slide_window()
def train_model(cars, notcars):
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
svr = svm.SVC()
svc = GridSearchCV(svr, parameters)
#svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
#model = pickle.dump(svc, 'model.pkl')
with open('model.p', 'wb') as f:
pickle.dump((svc, X_scaler), f)
#return svc, X_scaler
#image = mpimg.imread('test1.jpg')
#draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
#windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
# xy_window=(96, 96), xy_overlap=(0.5, 0.5))
#hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
#window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
#plt.imshow(window_img)
count =6
last_labels = []
history = deque(maxlen=5)
def find_vehicles_in_frame(image):
global count
global last_labels
#if count < 2:
# count = count + 1
# draw_img = draw_labeled_bboxes(np.copy(image), last_labels)
# return draw_img
#draw_img = draw_labeled_bboxes(np.copy(image), labels)
#return draw_img
#else:
#print(count)
ystart = 400
ystop = 656
scale = 1.5
box_list = []
#image = mpimg.imread('test1.jpg')
svc, X_scaler = pickle.load( open("model.p", "rb" ) )
box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
last_box_list = box_list
#ystart = 355
#ystop = 550
#scale = 1.5
#box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#box_list = box_list1 + box_list2
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
history.append(heat)
if count >=6:
#print('draw')
#print(history)
hist1 = 0
hist2 = 0 #NULL
hist3 = 0 #NULL
hist4 = 0 #NULL
hist5 = 0 #NULL
hist6 = 0 #NULL
hist7 = 0 #NULL
hist1 = history.popleft()
if history:
hist2 = history.popleft()
if history:
hist3 = history.popleft()
if history:
hist4 = history.popleft()
if history:
hist5 = history.popleft()
if history:
hist6 = history.popleft()
if history:
hist7 = history.popleft()
heat = hist1 + hist2 + hist3 + hist4 + hist5 + hist6 + hist7
# Apply threshold to help remove false positives
heat = apply_threshold(heat,7)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
last_labels = labels
#return labels, heatmap
count = 0
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
else:
#print('skip')
count = count + 1
#heatmap = np.clip(heat, 0, 255)
#labels = label(heatmap)
#last_labels = labels
draw_img = draw_labeled_bboxes(np.copy(image), last_labels)
return draw_img
def find_vehicles_in_video(video):
| output = "tracked2_" + video
input_clip = VideoFileClip(video)
clip = input_clip.fl_image(find_vehicles_in_frame)
#clip = input_clip.fl_image(save_image)
clip.write_videofile(output, audio=False) | identifier_body |
|
pipeline.py | 1.0)
new_img[:,:,2] = random_bright*new_img[:,:,2]
new_img[:,:,2][new_img[:,:,2]>255] = 255
new_img = np.array(new_img, dtype = np.uint8)
#Convert back to RGB colorspace
new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB)
#new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV)
return new_img
# Read in cars and notcars
images = glob.glob('./dataset/*.png')
cars = []
notcars = []
for image in images:
cars.append(image)
images = glob.glob('./dataset_nonv/*.png')
for image in images:
notcars.append(image)
# Reduce the sample size because
# The quiz evaluator times out after 13s of CPU time
#sample_size = 500
#cars = cars[0:sample_size]
#notcars = notcars[0:sample_size]
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 32 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [400, 656] # Min and max in y to search in slide_window()
def train_model(cars, notcars):
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
svr = svm.SVC()
svc = GridSearchCV(svr, parameters)
#svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
#model = pickle.dump(svc, 'model.pkl')
with open('model.p', 'wb') as f:
pickle.dump((svc, X_scaler), f)
#return svc, X_scaler
#image = mpimg.imread('test1.jpg')
#draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
#windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
# xy_window=(96, 96), xy_overlap=(0.5, 0.5))
#hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
#window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
#plt.imshow(window_img)
count =6
last_labels = []
history = deque(maxlen=5)
def find_vehicles_in_frame(image):
global count
global last_labels
#if count < 2:
# count = count + 1
# draw_img = draw_labeled_bboxes(np.copy(image), last_labels)
# return draw_img
#draw_img = draw_labeled_bboxes(np.copy(image), labels)
#return draw_img
#else:
#print(count)
ystart = 400
ystop = 656
scale = 1.5
box_list = []
#image = mpimg.imread('test1.jpg')
svc, X_scaler = pickle.load( open("model.p", "rb" ) )
box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
last_box_list = box_list
#ystart = 355
#ystop = 550
#scale = 1.5
#box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#box_list = box_list1 + box_list2
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
history.append(heat)
if count >=6:
#print('draw')
#print(history)
hist1 = 0
hist2 = 0 #NULL
hist3 = 0 #NULL
hist4 = 0 #NULL
hist5 = 0 #NULL
hist6 = 0 #NULL
hist7 = 0 #NULL
hist1 = history.popleft()
if history:
|
if history:
hist3 = history.popleft()
if history:
hist4 = history.popleft()
if | hist2 = history.popleft() | conditional_block |
pipeline.py | 1.0)
new_img[:,:,2] = random_bright*new_img[:,:,2]
new_img[:,:,2][new_img[:,:,2]>255] = 255
new_img = np.array(new_img, dtype = np.uint8)
#Convert back to RGB colorspace
new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB)
#new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2YUV)
return new_img
# Read in cars and notcars
images = glob.glob('./dataset/*.png')
cars = []
notcars = []
for image in images:
cars.append(image)
images = glob.glob('./dataset_nonv/*.png')
for image in images:
notcars.append(image)
# Reduce the sample size because
# The quiz evaluator times out after 13s of CPU time
#sample_size = 500
#cars = cars[0:sample_size]
#notcars = notcars[0:sample_size]
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 32 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [400, 656] # Min and max in y to search in slide_window()
def train_model(cars, notcars):
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
svr = svm.SVC()
svc = GridSearchCV(svr, parameters)
#svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
#model = pickle.dump(svc, 'model.pkl')
with open('model.p', 'wb') as f:
pickle.dump((svc, X_scaler), f)
#return svc, X_scaler
#image = mpimg.imread('test1.jpg')
#draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the |
#hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
#window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
#plt.imshow(window_img)
count =6
last_labels = []
history = deque(maxlen=5)
def find_vehicles_in_frame(image):
global count
global last_labels
#if count < 2:
# count = count + 1
# draw_img = draw_labeled_bboxes(np.copy(image), last_labels)
# return draw_img
#draw_img = draw_labeled_bboxes(np.copy(image), labels)
#return draw_img
#else:
#print(count)
ystart = 400
ystop = 656
scale = 1.5
box_list = []
#image = mpimg.imread('test1.jpg')
svc, X_scaler = pickle.load( open("model.p", "rb" ) )
box_list = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 464, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 416, 480, 1, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 500, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 530, 1.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 530, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 430, 560, 2, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 400, 600, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
box_list += find_cars(image, 464, 656, 3.5, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
last_box_list = box_list
#ystart = 355
#ystop = 550
#scale = 1.5
#box_list2 = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#box_list = box_list1 + box_list2
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
history.append(heat)
if count >=6:
#print('draw')
#print(history)
hist1 = 0
hist2 = 0 #NULL
hist3 = 0 #NULL
hist4 = 0 #NULL
hist5 = 0 #NULL
hist6 = 0 #NULL
hist7 = 0 #NULL
hist1 = history.popleft()
if history:
hist2 = history.popleft()
if history:
hist3 = history.popleft()
if history:
hist4 = history.popleft()
if | # image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
#windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
# xy_window=(96, 96), xy_overlap=(0.5, 0.5)) | random_line_split |
app.rs | Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn from(v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block
let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false);
let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping ...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) |
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::Click | {
eprintln!("Shutting down the whole thing");
} | identifier_body |
app.rs | ::Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn from(v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block | let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping ...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) {
eprintln!("Shutting down the whole thing");
}
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::ClickToggle(T | let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false); | random_line_split |
app.rs | ::Entry
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Action {
About,
Quit,
ClickToggle(ToggleButtonState)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ToggleButtonState {
State1,
State2,
}
impl<'a> From<&'a glib::Variant> for ToggleButtonState {
fn | (v: &glib::Variant) -> ToggleButtonState {
v.get::<bool>().expect("Invalid record state type").into()
}
}
impl From<bool> for ToggleButtonState {
fn from(v: bool) -> ToggleButtonState {
match v {
false => ToggleButtonState::State1,
true => ToggleButtonState::State2,
}
}
}
impl From<ToggleButtonState> for glib::Variant {
fn from(v: ToggleButtonState) -> glib::Variant {
match v {
ToggleButtonState::State1 => false.to_variant(),
ToggleButtonState::State2 => true.to_variant(),
}
}
}
trait GtkComboBoxTrait {
fn get_text(self: &Self) -> String;
}
impl GtkComboBoxTrait for gtk::ComboBoxText {
fn get_text(&self) -> String {
self.get_active_text()
.expect("Failed to get widget text")
.to_string()
}
}
impl App {
fn new(application: >k::Application) -> Result<App, Box<dyn error::Error>> {
let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT);
// Here build the UI but don't show it yet
let main_window = gtk::ApplicationWindow::new(application);
main_window.set_title("(poor) Postman");
main_window.set_border_width(5);
main_window.set_position(gtk::WindowPosition::Center);
main_window.set_default_size(840, 480);
// Create headerbar for the application window
let header_bar = HeaderBar::new(&main_window);
// create a widget container,
let layout = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create a title label
let url_title = gtk::Label::new(None);
url_title.set_markup("<big>Type in your URL</big>");
// Pressing Alt+T will activate this button
let button = gtk::Button::new();
let btn_label = gtk::Label::new_with_mnemonic(
Some("_Click to trigger request")
);
button.add(&btn_label);
// Trigger request button
let trigger_btn_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
trigger_btn_row.pack_start(&button, false, true, 10);
let url_input = gtk::Entry::new();
url_input.set_placeholder_text("(poor) Postman");
url_input.insert_text("http://httpbin.org/get", &mut 0);
let verb_selector = gtk::ComboBoxText::new();
verb_selector.insert(0, "ID0", "GET");
verb_selector.insert(1, "ID1", "POST");
verb_selector.set_active(Some(0));
let verb_url_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
verb_url_row.add(&verb_selector);
// http://gtk-rs.org/docs/gtk/prelude/trait.BoxExt.html#tymethod.pack_start
// params: child, expand, fill, padding (px)
verb_url_row.pack_start(&url_input, true, true, 0);
// Payload horizontal block
let payload_title = gtk::Label::new(None);
payload_title.set_markup("<big>Payload</big>");
let payload_input = gtk::Entry::new();
payload_input.insert_text(r#"ex. {"k": "key","v": "val"}"#, &mut 0);
payload_input.set_sensitive(false);
let payload_row = gtk::Box::new(gtk::Orientation::Horizontal, 5);
payload_row.set_sensitive(false);
payload_row.add(&payload_title);
payload_row.pack_start(&payload_input, true, true, 0);
// when POST is selected, activate the payload input box
// TODO: why don't I need to also clone "payload_input"?
verb_selector.connect_changed(clone!(payload_row, payload_input => move |verb_selector| {
let txt = gtk::ComboBoxText::get_text(&verb_selector);
match txt.as_ref() {
"POST" => {
payload_row.set_sensitive(true);
payload_input.set_sensitive(true);
}
_ => {
payload_row.set_sensitive(false);
payload_input.set_sensitive(false);
}
}
}));
// connect the Button click to the callback
button.connect_clicked(clone!(button, verb_selector, url_input,
payload_input, tx => move |_| {
button.set_sensitive(false);
// and trigger HTTP thread
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
url_input.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// connect the <Return> keypress to the callback
url_input.connect_activate(clone!(button, verb_selector,
payload_input, tx => move |_entry| {
button.set_sensitive(false);
spawn_thread(
&tx,
gtk::ComboBoxText::get_text(&verb_selector),
_entry.get_buffer().get_text().to_owned(),
Some(json!(payload_input.get_buffer().get_text().to_owned()))
);
}));
// container for the response
let response_container = gtk::TextView::new();
response_container.set_editable(false);
response_container.set_wrap_mode(gtk::WrapMode::Word);
let buf = response_container.get_buffer().expect("I thought it could work...");
buf.set_text("The response will appear here...");
// add all widgets
layout.add(&url_title);
layout.add(&verb_url_row);
layout.pack_start(&payload_row, false, true, 10);
layout.add(&trigger_btn_row);
layout.pack_start(&response_container, true, true, 10);
// add the widget container to the window
main_window.add(&layout);
let app = App {
main_window,
url_input,
header_bar,
};
// Create the application actions
Action::create(&app, &application);
// attach thread receiver
rx.attach(None, move |text| {
// let text = format_response(text);
buf.set_text(&text);
// enable the button again
button.set_sensitive(true);
// keeps the channel open
glib::Continue(true)
});
Ok(app)
}
pub fn on_startup(application: >k::Application) {
let app = match App::new(application) {
Ok(app) => app,
Err(err) => {
eprintln!("Error creating app: {}",err);
return;
}
};
application.connect_activate(clone!(app => move |_| {
app.on_activate();
}));
// cant get rid of this RefCell wrapping ...
let app_container = RefCell::new(Some(app));
application.connect_shutdown(move |_| {
let app = app_container
.borrow_mut()
.take()
.expect("Shutdown called multiple times");
app.on_shutdown();
});
}
fn on_activate(&self) {
// Show our window and bring it to the foreground
self.main_window.show_all();
self.main_window
.present_with_time((glib::get_monotonic_time() / 1000) as u32);
}
// Called when the application shuts down. We drop our app struct here
fn on_shutdown(self) {
eprintln!("Shutting down the whole thing");
}
}
impl Action {
// The full action name as is used in e.g. menu models
pub fn full_name(self) -> &'static str {
match self {
Action::About => "app.about",
Action::Quit => "app.quit",
Action::ClickToggle(_) => "app.toggle",
}
}
// Create our application actions here
fn create(app: &App, application: >k::Application) {
eprintln!("Creating actions!");
// about action: when activated it will show an about dialog
let about = gio::SimpleAction::new("about", None);
about.connect_activate(clone!(application => move |_action, _parameter| {
show_about_dialog(&application);
}));
application.add_action(&about);
// switch button action
// credits: https://github.com/gtk-rs/examples/blob/master/src/bin/menu_bar_system.rs
let switch_action = gio::SimpleAction::new_stateful("switch", None, &false.to_variant());
let switch_btn = &app.header_bar.switch_btn;
switch_btn.connect_property_active_notify(clone!(switch_action => move |s| {
eprintln!("The switch is now {}", &s.get_active().to_variant());
switch_action.change_state(&s.get_active().to_variant());
}));
application.add_action(&switch_action);
// toggle button action
let toggle_action = gio::SimpleAction::new_stateful("toggle", None, &false.to_variant());
let toggle_btn = &app.header_bar.toggle_button;
toggle_btn.connect_toggled(|btn| {
eprintln!("Button state is {}", btn.get_active());
let app = gio::Application::get_default().expect("No default application");
Action::Click | from | identifier_name |
footprint_analysis.rs | an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String |
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if !(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt | {
format!("opcode_{}", self.opcode)
} | identifier_body |
footprint_analysis.rs | an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if !(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn | <B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt:: | data_dep | identifier_name |
footprint_analysis.rs | an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if !(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive {
return false;
}
} |
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt:: | random_line_split |
|
footprint_analysis.rs | an instruction
register_writes_ignored: HashSet<Name>,
/// A store is any instruction with a WriteMem event
is_store: bool,
/// A load is any instruction with a ReadMem event
is_load: bool,
/// A branch is any instruction with a Branch event
is_branch: bool,
/// An exclusive is any event with an exclusive read or write kind.
is_exclusive: bool,
/// A cache-op is any event with a CacheOp event
is_cache_op: bool,
}
pub struct Footprintkey {
opcode: String,
}
impl Cachekey for Footprintkey {
fn key(&self) -> String {
format!("opcode_{}", self.opcode)
}
}
impl Cacheable for Footprint {
type Key = Footprintkey;
}
impl Footprint {
fn new() -> Self {
Footprint {
write_data_taints: (HashSet::new(), false),
mem_addr_taints: (HashSet::new(), false),
branch_addr_taints: (HashSet::new(), false),
register_reads: HashSet::new(),
register_writes: HashSet::new(),
register_writes_tainted: HashSet::new(),
register_writes_ignored: HashSet::new(),
is_store: false,
is_load: false,
is_branch: false,
is_exclusive: false,
is_cache_op: false,
}
}
/// This just prints the footprint information in a human-readable
/// form for debugging.
pub fn pretty(&self, buf: &mut dyn Write, symtab: &Symtab) -> Result<(), Box<dyn Error>> {
write!(buf, "Footprint:\n Memory write data:")?;
for (reg, accessor) in &self.write_data_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Memory address:")?;
for (reg, accessor) in &self.mem_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Branch address:")?;
for (reg, accessor) in &self.branch_addr_taints.0 {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register reads:")?;
for (reg, accessor) in &self.register_reads {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes:")?;
for (reg, accessor) in &self.register_writes {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Register writes (tainted):")?;
for (reg, accessor) in &self.register_writes_tainted {
write!(buf, " {}", zencode::decode(symtab.to_str(*reg)))?;
for component in accessor {
component.pretty(buf, symtab)?
}
}
write!(buf, "\n Is store: {}", self.is_store)?;
write!(buf, "\n Is load: {}", self.is_load)?;
write!(buf, "\n Is exclusive: {}", self.is_exclusive)?;
write!(buf, "\n Is branch: {}", self.is_branch)?;
writeln!(buf)?;
Ok(())
}
}
// There is an rmw dependency from `from` to `to` if `from` is a
// load-exclusive and `to` is a store-exclusive and there are no
// intervening exclusives.
#[allow(clippy::needless_range_loop)]
pub fn rmw_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from > to {
return false;
}
let from_footprint = footprints.get(&instrs[from]).unwrap();
if !(from_footprint.is_exclusive && from_footprint.is_load) {
return false;
}
for i in (from + 1)..to {
if footprints.get(&instrs[i]).unwrap().is_exclusive |
}
let to_footprint = footprints.get(&instrs[to]).unwrap();
to_footprint.is_exclusive && to_footprint.is_store
}
/// The set of registers that could be (syntactically) touched by the
/// first instruction before reaching the second.
#[allow(clippy::needless_range_loop)]
fn touched_by<B: BV>(
from: usize,
to: usize,
instrs: &[B],
footprints: &HashMap<B, Footprint>,
) -> HashSet<(Name, Vec<Accessor>)> {
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = HashSet::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.insert(wreg.clone());
}
}
}
}
if new_touched.is_empty() {
for wreg in &footprint.register_writes {
touched.remove(wreg);
}
} else {
new_touched.drain().for_each(|wreg| {
touched.insert(wreg);
})
}
}
touched
}
/// Returns true if there exists an RR or RW address dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// Panics if either `from` or `to` are out-of-bounds in `instrs`, or
/// if an instruction does not have a footprint.
pub fn addr_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be po-order-later than `from` for the dependency to exist.
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
// If any of the registers transitively touched by the first
// instruction's register writes can feed into a memory address
// used by the last we have an address dependency.
for reg in &footprints.get(&instrs[to]).unwrap().mem_addr_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW data dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
pub fn data_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
if from >= to {
return false;
}
let touched = touched_by(from, to, instrs, footprints);
for reg in &footprints.get(&instrs[to]).unwrap().write_data_taints.0 {
if touched.contains(reg) {
return true;
}
}
false
}
/// Returns true if there exists an RW or RR control dependency from `instrs[from]` to `instrs[to]`.
///
/// # Panics
///
/// See `addr_dep`
#[allow(clippy::needless_range_loop)]
pub fn ctrl_dep<B: BV>(from: usize, to: usize, instrs: &[B], footprints: &HashMap<B, Footprint>) -> bool {
// `to` must be a program-order later load or store
let to_footprint = footprints.get(&instrs[from]).unwrap();
if !(to_footprint.is_load || to_footprint.is_store) || (from >= to) {
return false;
}
let mut touched = footprints.get(&instrs[from]).unwrap().register_writes_tainted.clone();
let mut new_touched = Vec::new();
for i in (from + 1)..to {
let footprint = footprints.get(&instrs[i]).unwrap();
if footprint.is_branch {
for reg in &footprint.branch_addr_taints.0 {
if touched.contains(®) {
return true;
}
}
}
for rreg in &touched {
if footprint.register_reads.contains(rreg) {
for wreg in &footprint.register_writes {
if !footprint.register_writes_ignored.contains(&wreg.0) {
new_touched.push(wreg.clone());
}
}
}
}
new_touched.drain(..).for_each(|wreg| {
touched.insert(wreg);
})
}
false
}
#[derive(Debug)]
pub enum FootprintError {
NoIslaFootprintFn,
SymbolicInstruction,
ExecutionError(String),
}
impl fmt | {
return false;
} | conditional_block |
express.js | eg2_query_parameter = () => {
app.get('/', (req, res) => {
console.log('query: all')
console.log('--------------------')
console.log(req.query)
console.log('query: one by one')
console.log('--------------------')
for (const key in req.query) {
console.log(key, ': ', req.query[key])
}
console.log(`req.query.name: ${req.query.name}`)
console.log('--------------------')
res.end()
});
app.listen(3000)
};
eg3_post_query = () => {
// for Content-Type: application/json
// if header =
app.use(express.json());
// for Content-Type: application/x-www-form-urlencoded
// if header =
app.use(express.urlencoded());
app.post('/form', (req, res) => {
const name = req.body.name;
});
};
eg4_response = () => {
app.get('/answer', (req, res) => {
// if give text Content-Type: text/html
// if give object Content-Type: application/json
res.send({"coucou": "walou"});
});
app.get('/200', (req, res) => {
res.sendStatus(200);
// <=> res.status(200).send('Ok');
});
app.get('/403', (req, res) => {
res.sendStatus(403);
// <=> res.status(403).send('Forbidden');
});
app.get('/404', (req, res) => {
res.sendStatus(404);
// <=> res.status(404).send('File not found');
});
app.get('/500', (req, res) => {
res.sendStatus(500)
// === res.status(500).send('Internal Server Error')
});
app.get('/json', (req, res) => {
res.json({"coucou": "walou"});
});
app.listen(3000, () => console.log('Server ready'))
};
eg5_header = () => {
app.get('/json', (req, res) => {
res.set('content-type', 'application/json');
res.type('json');
res.end();
});
app.listen(3000, () => console.log('Server ready'))
};
eg6_redirect = () => {
// made a 302
app.get('/1', (req, res) => {
res.redirect('/2');
});
app.get('/2', (req, res) => {
res.redirect(301, '/3/hey');
});
app.get('/3/hey', (req, res) => {
res.redirect(301, '/..');
});
app.get('/3', (req, res) => {
res.end();
});
app.get('/back', (req, res) => {
res.redirect('back');
});
app.listen(3000, () => console.log('Server ready'))
};
eg7_routing_parameters = () => {
app.get('/uppercase/:theValue', (req, res) => {
res.send(req.params.theValue.toUpperCase());
});
// regex will match /post , /post/first , /thepost , /posting/something , and so on.
app.get(/post/, (req, res) => res.end())
app.listen(3000, () => console.log('Server ready'))
};
eg8_cors = () => {
const whitelist = ['http://example1.com', 'http://example2.com']
const corsOptions = {
origin: function(origin, callback) {
if (whitelist.indexOf(origin) !== -1) | else {
callback(new Error('Not allowed by CORS'))
}
}
}
app.get('/with-cors', cors(corsOptions), (req, res, next) => {
res.json({ msg: 'WHOAH with CORS it works!' });
});
app.listen(3000, () => console.log('Server ready'))
};
eg9_prefligth = () => {
//allow OPTIONS on just one resource
app.options('/the/resource/you/request', cors());
//allow OPTIONS on all resources
app.options('*', cors());
};
eg10_middleware = () => {
const myMiddleware = (req, res, next) => {
next()
};
app.get('/', myMiddleware, (req, res) => res.send('Hello World!'));
};
eg11_static_file = () => {
app.get('/', (req, res) => res.download('./yarn.lock'));
app.get('/', (req, res) => res.download('./yarn.lock', './dat_spam.txt'));
app.listen(3000, () => console.log('Server ready'));
};
eg12_session = () => {
app.use(session({
'secret': '343ji43j4n3jn4jk3n',
}));
app.get('/', (req, res, next) => {
req.session.name = 'Flavio'
res.send(req.session);
console.log(req.session);
});
app.listen(3000, () => console.log('Server ready'));
};
eg13_validating_input = () => {
app.post('/form', [
check('name')
.isAlpha()
.withMessage('Must be only alphabetical chars')
.isLength({ min: 10 })
.withMessage('Must be at least 10 chars long'),
check('email')
.isEmail()
.custom(email => {
if (alreadyHaveEmail(email)) {
throw new Error('Email already registered')
}
}),
check('age').isNumeric()
], (req, res) => {
const name = req.body.name;
const email = req.body.email;
const age = req.body.age;
});
app.listen(3000, () => console.log('Server ready'));
};
eg14_sanitizing = () => {
app.use(express.json());
// trim() trims characters (whitespace by default) at the beginning and at the end of a string
// escape() replaces < , > , & , ' , " and / with their corresponding HTML entities
// normalizeEmail() canonicalizes an email address. Accepts several options to lowercase email addresses or subaddresses (e.g. [email protected] )
// blacklist() remove characters that appear in the blacklist
// whitelist() remove characters that do not appear in the whitelist
// unescape() replaces HTML encoded entities with < , > , & , ' , " and /
// ltrim() like trim(), but only trims characters at the start of the string
// rtrim() like trim(), but only trims characters at the end of the string
// stripLow() remove ASCII control characters, which are normally invisible
const sanitizeValue = value => {
return value;
};
app.post('/form', [
check('name')
.isAlpha()
.withMessage('Must be only alphabetical chars')
.isLength({ min: 10 })
.withMessage('Must be at least 10 chars long')
.trim().escape(),
check('email')
.isEmail()
.custom(email => {
if (alreadyHaveEmail(email)) {
throw new Error('Email already registered')
}
})
.normalizeEmail(),
check('age').isNumeric()
.trim().escape(),
check('value').customSanitizer(value => sanitizeValue(value)),
], (req, res) => {
const name = req.body.name;
const email = req.body.email;
const age = req.body.age;
});
app.listen(3000, () => console.log('Server ready'));
};
eg15_handling_form = () => {
// wtf ... don t understand
app.listen(3000, () => console.log('Server ready'));
};
eg16_file_upload = () => {
app.listen(3000, () => console.log('Server ready'));
};
eg17_https = () => {
/**
* with openssl generate certs
* `openssl req -nodes -new -x509 -keyout server.key -out server.cert`
* Just remember to set this to localhost
*/
const https = require('https');
const fs = require('fs');
app.get('/', (req, res) => {
res.send('Hello HTTPS!');
});
https.createServer({
key: fs.readFileSync('server.key'),
cert: fs.readFileSync('server.cert')
}, app).listen(3000, () => {
console.log('Listening...');
});
};
eg18_let_s_encrypt = () => {
/**
* install certbot
* ```
* sudo add-apt repository ppa:certbot/certbot
* sudo apt-get update
* sudo apt-get install certbot
* ```
*
* generate cert
* certbot certonly --manual
*
* active renewal
* 0 */12 * * * root /usr/local/bin/certbot renew >/dev/null 2>&1
*/
app.use(express.static(__dirname + '/static', { dotfiles: 'allow' } ))
const fs = require('fs')
const https = require('https')
const app = express()
| {
callback(null, true)
} | conditional_block |
express.js | eg2_query_parameter = () => {
app.get('/', (req, res) => {
console.log('query: all')
console.log('--------------------')
console.log(req.query)
console.log('query: one by one')
console.log('--------------------')
for (const key in req.query) {
console.log(key, ': ', req.query[key])
}
console.log(`req.query.name: ${req.query.name}`) | res.end()
});
app.listen(3000)
};
eg3_post_query = () => {
// for Content-Type: application/json
// if header =
app.use(express.json());
// for Content-Type: application/x-www-form-urlencoded
// if header =
app.use(express.urlencoded());
app.post('/form', (req, res) => {
const name = req.body.name;
});
};
eg4_response = () => {
app.get('/answer', (req, res) => {
// if give text Content-Type: text/html
// if give object Content-Type: application/json
res.send({"coucou": "walou"});
});
app.get('/200', (req, res) => {
res.sendStatus(200);
// <=> res.status(200).send('Ok');
});
app.get('/403', (req, res) => {
res.sendStatus(403);
// <=> res.status(403).send('Forbidden');
});
app.get('/404', (req, res) => {
res.sendStatus(404);
// <=> res.status(404).send('File not found');
});
app.get('/500', (req, res) => {
res.sendStatus(500)
// === res.status(500).send('Internal Server Error')
});
app.get('/json', (req, res) => {
res.json({"coucou": "walou"});
});
app.listen(3000, () => console.log('Server ready'))
};
eg5_header = () => {
app.get('/json', (req, res) => {
res.set('content-type', 'application/json');
res.type('json');
res.end();
});
app.listen(3000, () => console.log('Server ready'))
};
eg6_redirect = () => {
// made a 302
app.get('/1', (req, res) => {
res.redirect('/2');
});
app.get('/2', (req, res) => {
res.redirect(301, '/3/hey');
});
app.get('/3/hey', (req, res) => {
res.redirect(301, '/..');
});
app.get('/3', (req, res) => {
res.end();
});
app.get('/back', (req, res) => {
res.redirect('back');
});
app.listen(3000, () => console.log('Server ready'))
};
eg7_routing_parameters = () => {
app.get('/uppercase/:theValue', (req, res) => {
res.send(req.params.theValue.toUpperCase());
});
// regex will match /post , /post/first , /thepost , /posting/something , and so on.
app.get(/post/, (req, res) => res.end())
app.listen(3000, () => console.log('Server ready'))
};
eg8_cors = () => {
const whitelist = ['http://example1.com', 'http://example2.com']
const corsOptions = {
origin: function(origin, callback) {
if (whitelist.indexOf(origin) !== -1) {
callback(null, true)
} else {
callback(new Error('Not allowed by CORS'))
}
}
}
app.get('/with-cors', cors(corsOptions), (req, res, next) => {
res.json({ msg: 'WHOAH with CORS it works!' });
});
app.listen(3000, () => console.log('Server ready'))
};
eg9_prefligth = () => {
//allow OPTIONS on just one resource
app.options('/the/resource/you/request', cors());
//allow OPTIONS on all resources
app.options('*', cors());
};
eg10_middleware = () => {
const myMiddleware = (req, res, next) => {
next()
};
app.get('/', myMiddleware, (req, res) => res.send('Hello World!'));
};
eg11_static_file = () => {
app.get('/', (req, res) => res.download('./yarn.lock'));
app.get('/', (req, res) => res.download('./yarn.lock', './dat_spam.txt'));
app.listen(3000, () => console.log('Server ready'));
};
eg12_session = () => {
app.use(session({
'secret': '343ji43j4n3jn4jk3n',
}));
app.get('/', (req, res, next) => {
req.session.name = 'Flavio'
res.send(req.session);
console.log(req.session);
});
app.listen(3000, () => console.log('Server ready'));
};
eg13_validating_input = () => {
app.post('/form', [
check('name')
.isAlpha()
.withMessage('Must be only alphabetical chars')
.isLength({ min: 10 })
.withMessage('Must be at least 10 chars long'),
check('email')
.isEmail()
.custom(email => {
if (alreadyHaveEmail(email)) {
throw new Error('Email already registered')
}
}),
check('age').isNumeric()
], (req, res) => {
const name = req.body.name;
const email = req.body.email;
const age = req.body.age;
});
app.listen(3000, () => console.log('Server ready'));
};
eg14_sanitizing = () => {
app.use(express.json());
// trim() trims characters (whitespace by default) at the beginning and at the end of a string
// escape() replaces < , > , & , ' , " and / with their corresponding HTML entities
// normalizeEmail() canonicalizes an email address. Accepts several options to lowercase email addresses or subaddresses (e.g. [email protected] )
// blacklist() remove characters that appear in the blacklist
// whitelist() remove characters that do not appear in the whitelist
// unescape() replaces HTML encoded entities with < , > , & , ' , " and /
// ltrim() like trim(), but only trims characters at the start of the string
// rtrim() like trim(), but only trims characters at the end of the string
// stripLow() remove ASCII control characters, which are normally invisible
const sanitizeValue = value => {
return value;
};
app.post('/form', [
check('name')
.isAlpha()
.withMessage('Must be only alphabetical chars')
.isLength({ min: 10 })
.withMessage('Must be at least 10 chars long')
.trim().escape(),
check('email')
.isEmail()
.custom(email => {
if (alreadyHaveEmail(email)) {
throw new Error('Email already registered')
}
})
.normalizeEmail(),
check('age').isNumeric()
.trim().escape(),
check('value').customSanitizer(value => sanitizeValue(value)),
], (req, res) => {
const name = req.body.name;
const email = req.body.email;
const age = req.body.age;
});
app.listen(3000, () => console.log('Server ready'));
};
eg15_handling_form = () => {
// wtf ... don t understand
app.listen(3000, () => console.log('Server ready'));
};
eg16_file_upload = () => {
app.listen(3000, () => console.log('Server ready'));
};
eg17_https = () => {
/**
* with openssl generate certs
* `openssl req -nodes -new -x509 -keyout server.key -out server.cert`
* Just remember to set this to localhost
*/
const https = require('https');
const fs = require('fs');
app.get('/', (req, res) => {
res.send('Hello HTTPS!');
});
https.createServer({
key: fs.readFileSync('server.key'),
cert: fs.readFileSync('server.cert')
}, app).listen(3000, () => {
console.log('Listening...');
});
};
eg18_let_s_encrypt = () => {
/**
* install certbot
* ```
* sudo add-apt repository ppa:certbot/certbot
* sudo apt-get update
* sudo apt-get install certbot
* ```
*
* generate cert
* certbot certonly --manual
*
* active renewal
* 0 */12 * * * root /usr/local/bin/certbot renew >/dev/null 2>&1
*/
app.use(express.static(__dirname + '/static', { dotfiles: 'allow' } ))
const fs = require('fs')
const https = require('https')
const app = express()
| console.log('--------------------') | random_line_split |
orchestrator.go | actual workload is. It then tries to fix the delta.
//
// The expected task list can be altered via AddTask, RemoveTask and
// UpdateTasks. Each method is safe to be called on multiple go-routines.
type Orchestrator struct {
log Logger
s func(TermStats)
timeout time.Duration
mu sync.Mutex
workers []Worker
expectedTasks []Task
// LastActual is set each term. It is only used for a user who wants to
// know the state of the worker cluster from the last term.
lastActual []WorkerState
}
// New creates a new Orchestrator.
func New(opts ...OrchestratorOption) *Orchestrator {
o := &Orchestrator{
s: func(TermStats) {},
log: log.New(ioutil.Discard, "", 0),
timeout: 10 * time.Second,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NextTerm reaches out to the cluster to gather to actual workload. It then
// attempts to fix the delta between actual and expected. The lifecycle of
// the term is managed by the given context.
func (o *Orchestrator) NextTerm(ctx context.Context) {
o.mu.Lock()
defer o.mu.Unlock()
// Gather the state of the world from the workers.
actual := o.collectActual(ctx)
toAdd, toRemove := o.delta(actual)
// Rebalance tasks among workers.
toAdd, toRemove = rebalance(toAdd, toRemove, actual)
counts := counts(actual, toRemove)
for worker, tasks := range toRemove {
for _, task := range tasks {
// Remove the task from the workers.
removeCtx, _ := context.WithTimeout(ctx, o.timeout)
worker.Remove(removeCtx, task)
}
}
for taskDefinition, missing := range toAdd {
history := make(map[Worker]bool)
for i := 0; i < missing; i++ {
counts = o.assignTask(ctx,
taskDefinition,
counts,
actual,
history,
)
}
}
o.s(TermStats{
WorkerCount: len(actual),
})
}
// collectActual reaches out to each worker and gets their state of the world.
// Each worker is queried in parallel. If a worker returns an error while
// trying to list the tasks, it will be logged and not considered for what
// workers should be assigned work.
func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} {
type result struct {
worker Worker
actual []interface{}
err error
}
listCtx, _ := context.WithTimeout(ctx, o.timeout)
results := make(chan result, len(o.workers))
errs := make(chan result, len(o.workers))
for _, worker := range o.workers {
go func(worker Worker) {
listResults, err := worker.List(listCtx)
if err != nil {
errs <- result{worker: worker, err: err}
return
}
results <- result{worker: worker, actual: listResults}
}(worker)
}
t := time.NewTimer(o.timeout)
var state []WorkerState
actual := make(map[Worker][]interface{})
for i := 0; i < len(o.workers); i++ {
select {
case <-ctx.Done():
break
case nextResult := <-results:
actual[nextResult.worker] = nextResult.actual
state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual})
case err := <-errs:
o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err)
case <-t.C:
o.log.Printf("Communicator timeout. Using results available...")
break
}
}
o.lastActual = state
return actual
}
// delta finds what should be added and removed to make actual match the
// expected.
func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) {
toAdd = make(map[interface{}]int)
toRemove = make(map[Worker][]interface{})
expectedTasks := make([]Task, len(o.expectedTasks))
copy(expectedTasks, o.expectedTasks)
for _, task := range o.expectedTasks {
needs := hasEnoughInstances(task, actual)
if needs == 0 {
continue
}
toAdd[task.Definition] = needs
}
for worker, tasks := range actual {
for _, task := range tasks {
if idx := containsTask(task, expectedTasks); idx >= 0 {
expectedTasks[idx].Instances--
if expectedTasks[idx].Instances == 0 {
expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...)
}
continue
}
toRemove[worker] = append(toRemove[worker], task)
}
}
return toAdd, toRemove
}
// assignTask tries to find a worker that does not have too many tasks
// assigned. If it encounters a worker with too many tasks, it will remove
// it from the pool and try again.
func (o *Orchestrator) assignTask(
ctx context.Context,
taskDefinition interface{},
workerLoads []workerLoad,
actual map[Worker][]interface{},
history map[Worker]bool,
) []workerLoad {
activeWorkers := len(actual)
if activeWorkers == 0 {
return workerLoads
}
totalTasks := o.totalTaskCount()
maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers
for i, loadInfo := range workerLoads {
// Ensure that each worker gets an even amount of work assigned.
// Therefore if a worker gets its fair share, remove it from the worker
// pool for this term. This also accounts for there being a non-divisible
// amount of tasks per workers.
loadInfo.taskCount++
if loadInfo.taskCount > maxTaskCount {
workerLoads = append(workerLoads[:i], workerLoads[i+1:]...)
// Recurse since the worker pool was adjusted and the task was
// not assigned.
return o.assignTask(ctx, taskDefinition, workerLoads, actual, history)
}
// Ensure we haven't assigned this task to the worker already.
if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 {
continue
}
history[loadInfo.worker] = true
// Assign the task to the worker.
o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker)
addCtx, _ := context.WithTimeout(ctx, o.timeout)
loadInfo.worker.Add(addCtx, taskDefinition)
// Move updated count to end of slice to help with fairness
workerLoads = append(
append(workerLoads[:i], workerLoads[i+1:]...),
workerLoad{
worker: loadInfo.worker,
taskCount: loadInfo.taskCount,
},
)
break
}
return workerLoads
}
// totalTaskCount calculates the total number of expected task instances.
func (o *Orchestrator) totalTaskCount() int {
var total int
for _, t := range o.expectedTasks {
total += t.Instances
}
return total
}
// AddWorker adds a worker to the known worker cluster. The update will not
// take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) AddWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this worker
idx := containsWorker(worker, o.workers)
if idx > -1 {
return
}
o.workers = append(o.workers, worker)
}
// RemoveWorker removes a worker from the known worker cluster. The update
// will not take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) RemoveWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsWorker(worker, o.workers)
if idx < 0 {
return
}
o.workers = append(o.workers[:idx], o.workers[idx+1:]...)
}
// UpdateWorkers overwrites the expected worker list. The update will not take
// affect until the next term. It is safe to invoke AddWorker, RemoveWorker
// and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) UpdateWorkers(workers []Worker) {
o.mu.Lock()
defer o.mu.Unlock()
o.workers = workers
}
// Task stores the required information for a task.
type Task struct {
Definition interface{}
Instances int
}
// AddTask adds a new task to the expected workload. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this task
for _, t := range o.expectedTasks {
if taskDefinition == t.Definition | {
return
} | conditional_block |
|
orchestrator.go | Actual(ctx)
toAdd, toRemove := o.delta(actual)
// Rebalance tasks among workers.
toAdd, toRemove = rebalance(toAdd, toRemove, actual)
counts := counts(actual, toRemove)
for worker, tasks := range toRemove {
for _, task := range tasks {
// Remove the task from the workers.
removeCtx, _ := context.WithTimeout(ctx, o.timeout)
worker.Remove(removeCtx, task)
}
}
for taskDefinition, missing := range toAdd {
history := make(map[Worker]bool)
for i := 0; i < missing; i++ {
counts = o.assignTask(ctx,
taskDefinition,
counts,
actual,
history,
)
}
}
o.s(TermStats{
WorkerCount: len(actual),
})
}
// collectActual reaches out to each worker and gets their state of the world.
// Each worker is queried in parallel. If a worker returns an error while
// trying to list the tasks, it will be logged and not considered for what
// workers should be assigned work.
func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} {
type result struct {
worker Worker
actual []interface{}
err error
}
listCtx, _ := context.WithTimeout(ctx, o.timeout)
results := make(chan result, len(o.workers))
errs := make(chan result, len(o.workers))
for _, worker := range o.workers {
go func(worker Worker) {
listResults, err := worker.List(listCtx)
if err != nil {
errs <- result{worker: worker, err: err}
return
}
results <- result{worker: worker, actual: listResults}
}(worker)
}
t := time.NewTimer(o.timeout)
var state []WorkerState
actual := make(map[Worker][]interface{})
for i := 0; i < len(o.workers); i++ {
select {
case <-ctx.Done():
break
case nextResult := <-results:
actual[nextResult.worker] = nextResult.actual
state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual})
case err := <-errs:
o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err)
case <-t.C:
o.log.Printf("Communicator timeout. Using results available...")
break
}
}
o.lastActual = state
return actual
}
// delta finds what should be added and removed to make actual match the
// expected.
func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) {
toAdd = make(map[interface{}]int)
toRemove = make(map[Worker][]interface{})
expectedTasks := make([]Task, len(o.expectedTasks))
copy(expectedTasks, o.expectedTasks)
for _, task := range o.expectedTasks {
needs := hasEnoughInstances(task, actual)
if needs == 0 {
continue
}
toAdd[task.Definition] = needs
}
for worker, tasks := range actual {
for _, task := range tasks {
if idx := containsTask(task, expectedTasks); idx >= 0 {
expectedTasks[idx].Instances--
if expectedTasks[idx].Instances == 0 {
expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...)
}
continue
}
toRemove[worker] = append(toRemove[worker], task)
}
}
return toAdd, toRemove
}
// assignTask tries to find a worker that does not have too many tasks
// assigned. If it encounters a worker with too many tasks, it will remove
// it from the pool and try again.
func (o *Orchestrator) assignTask(
ctx context.Context,
taskDefinition interface{},
workerLoads []workerLoad,
actual map[Worker][]interface{},
history map[Worker]bool,
) []workerLoad {
activeWorkers := len(actual)
if activeWorkers == 0 {
return workerLoads
}
totalTasks := o.totalTaskCount()
maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers
for i, loadInfo := range workerLoads {
// Ensure that each worker gets an even amount of work assigned.
// Therefore if a worker gets its fair share, remove it from the worker
// pool for this term. This also accounts for there being a non-divisible
// amount of tasks per workers.
loadInfo.taskCount++
if loadInfo.taskCount > maxTaskCount {
workerLoads = append(workerLoads[:i], workerLoads[i+1:]...)
// Recurse since the worker pool was adjusted and the task was
// not assigned.
return o.assignTask(ctx, taskDefinition, workerLoads, actual, history)
}
// Ensure we haven't assigned this task to the worker already.
if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 {
continue
}
history[loadInfo.worker] = true
// Assign the task to the worker.
o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker)
addCtx, _ := context.WithTimeout(ctx, o.timeout)
loadInfo.worker.Add(addCtx, taskDefinition)
// Move updated count to end of slice to help with fairness
workerLoads = append(
append(workerLoads[:i], workerLoads[i+1:]...),
workerLoad{
worker: loadInfo.worker,
taskCount: loadInfo.taskCount,
},
)
break
}
return workerLoads
}
// totalTaskCount calculates the total number of expected task instances.
func (o *Orchestrator) totalTaskCount() int {
var total int
for _, t := range o.expectedTasks {
total += t.Instances
}
return total
}
// AddWorker adds a worker to the known worker cluster. The update will not
// take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) AddWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this worker
idx := containsWorker(worker, o.workers)
if idx > -1 {
return
}
o.workers = append(o.workers, worker)
}
// RemoveWorker removes a worker from the known worker cluster. The update
// will not take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) RemoveWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsWorker(worker, o.workers)
if idx < 0 {
return
}
o.workers = append(o.workers[:idx], o.workers[idx+1:]...)
}
// UpdateWorkers overwrites the expected worker list. The update will not take
// affect until the next term. It is safe to invoke AddWorker, RemoveWorker
// and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) UpdateWorkers(workers []Worker) {
o.mu.Lock()
defer o.mu.Unlock()
o.workers = workers
}
// Task stores the required information for a task.
type Task struct {
Definition interface{}
Instances int
}
// AddTask adds a new task to the expected workload. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this task
for _, t := range o.expectedTasks {
if taskDefinition == t.Definition {
return
}
}
t := Task{Definition: taskDefinition, Instances: 1}
for _, opt := range opts {
opt(&t)
}
o.expectedTasks = append(o.expectedTasks, t)
}
// TaskOption is used to configure a task when it is being added.
type TaskOption func(*Task)
// WithTaskInstances configures the number of tasks. Defaults to 1.
func WithTaskInstances(i int) TaskOption {
return func(t *Task) {
t.Instances = i
}
}
// RemoveTask removes a task from the expected workload. The update will not
// take affect until the next term. It is safe to invoke AddTask, RemoveTask
// and UpdateTasks on multiple go-routines.
func (o *Orchestrator) RemoveTask(taskDefinition interface{}) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsTask(taskDefinition, o.expectedTasks)
if idx < 0 {
return
}
o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...)
}
// UpdateTasks overwrites the expected task list. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) UpdateTasks(tasks []Task) | {
o.mu.Lock()
defer o.mu.Unlock()
o.expectedTasks = tasks
} | identifier_body |
|
orchestrator.go | to the cluster
// to see what the actual workload is. It then tries to fix the delta.
//
// The expected task list can be altered via AddTask, RemoveTask and
// UpdateTasks. Each method is safe to be called on multiple go-routines.
type Orchestrator struct {
log Logger
s func(TermStats)
timeout time.Duration
mu sync.Mutex
workers []Worker
expectedTasks []Task
// LastActual is set each term. It is only used for a user who wants to
// know the state of the worker cluster from the last term.
lastActual []WorkerState
}
// New creates a new Orchestrator.
func New(opts ...OrchestratorOption) *Orchestrator {
o := &Orchestrator{
s: func(TermStats) {},
log: log.New(ioutil.Discard, "", 0),
timeout: 10 * time.Second,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NextTerm reaches out to the cluster to gather to actual workload. It then
// attempts to fix the delta between actual and expected. The lifecycle of
// the term is managed by the given context.
func (o *Orchestrator) NextTerm(ctx context.Context) {
o.mu.Lock()
defer o.mu.Unlock()
// Gather the state of the world from the workers.
actual := o.collectActual(ctx)
toAdd, toRemove := o.delta(actual)
// Rebalance tasks among workers.
toAdd, toRemove = rebalance(toAdd, toRemove, actual)
counts := counts(actual, toRemove)
for worker, tasks := range toRemove {
for _, task := range tasks {
// Remove the task from the workers.
removeCtx, _ := context.WithTimeout(ctx, o.timeout)
worker.Remove(removeCtx, task)
}
}
for taskDefinition, missing := range toAdd {
history := make(map[Worker]bool)
for i := 0; i < missing; i++ {
counts = o.assignTask(ctx,
taskDefinition,
counts,
actual, | }
o.s(TermStats{
WorkerCount: len(actual),
})
}
// collectActual reaches out to each worker and gets their state of the world.
// Each worker is queried in parallel. If a worker returns an error while
// trying to list the tasks, it will be logged and not considered for what
// workers should be assigned work.
func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} {
type result struct {
worker Worker
actual []interface{}
err error
}
listCtx, _ := context.WithTimeout(ctx, o.timeout)
results := make(chan result, len(o.workers))
errs := make(chan result, len(o.workers))
for _, worker := range o.workers {
go func(worker Worker) {
listResults, err := worker.List(listCtx)
if err != nil {
errs <- result{worker: worker, err: err}
return
}
results <- result{worker: worker, actual: listResults}
}(worker)
}
t := time.NewTimer(o.timeout)
var state []WorkerState
actual := make(map[Worker][]interface{})
for i := 0; i < len(o.workers); i++ {
select {
case <-ctx.Done():
break
case nextResult := <-results:
actual[nextResult.worker] = nextResult.actual
state = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual})
case err := <-errs:
o.log.Printf("Error trying to list tasks from %s: %s", err.worker, err.err)
case <-t.C:
o.log.Printf("Communicator timeout. Using results available...")
break
}
}
o.lastActual = state
return actual
}
// delta finds what should be added and removed to make actual match the
// expected.
func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) {
toAdd = make(map[interface{}]int)
toRemove = make(map[Worker][]interface{})
expectedTasks := make([]Task, len(o.expectedTasks))
copy(expectedTasks, o.expectedTasks)
for _, task := range o.expectedTasks {
needs := hasEnoughInstances(task, actual)
if needs == 0 {
continue
}
toAdd[task.Definition] = needs
}
for worker, tasks := range actual {
for _, task := range tasks {
if idx := containsTask(task, expectedTasks); idx >= 0 {
expectedTasks[idx].Instances--
if expectedTasks[idx].Instances == 0 {
expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...)
}
continue
}
toRemove[worker] = append(toRemove[worker], task)
}
}
return toAdd, toRemove
}
// assignTask tries to find a worker that does not have too many tasks
// assigned. If it encounters a worker with too many tasks, it will remove
// it from the pool and try again.
func (o *Orchestrator) assignTask(
ctx context.Context,
taskDefinition interface{},
workerLoads []workerLoad,
actual map[Worker][]interface{},
history map[Worker]bool,
) []workerLoad {
activeWorkers := len(actual)
if activeWorkers == 0 {
return workerLoads
}
totalTasks := o.totalTaskCount()
maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers
for i, loadInfo := range workerLoads {
// Ensure that each worker gets an even amount of work assigned.
// Therefore if a worker gets its fair share, remove it from the worker
// pool for this term. This also accounts for there being a non-divisible
// amount of tasks per workers.
loadInfo.taskCount++
if loadInfo.taskCount > maxTaskCount {
workerLoads = append(workerLoads[:i], workerLoads[i+1:]...)
// Recurse since the worker pool was adjusted and the task was
// not assigned.
return o.assignTask(ctx, taskDefinition, workerLoads, actual, history)
}
// Ensure we haven't assigned this task to the worker already.
if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 {
continue
}
history[loadInfo.worker] = true
// Assign the task to the worker.
o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker)
addCtx, _ := context.WithTimeout(ctx, o.timeout)
loadInfo.worker.Add(addCtx, taskDefinition)
// Move updated count to end of slice to help with fairness
workerLoads = append(
append(workerLoads[:i], workerLoads[i+1:]...),
workerLoad{
worker: loadInfo.worker,
taskCount: loadInfo.taskCount,
},
)
break
}
return workerLoads
}
// totalTaskCount calculates the total number of expected task instances.
func (o *Orchestrator) totalTaskCount() int {
var total int
for _, t := range o.expectedTasks {
total += t.Instances
}
return total
}
// AddWorker adds a worker to the known worker cluster. The update will not
// take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) AddWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this worker
idx := containsWorker(worker, o.workers)
if idx > -1 {
return
}
o.workers = append(o.workers, worker)
}
// RemoveWorker removes a worker from the known worker cluster. The update
// will not take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) RemoveWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsWorker(worker, o.workers)
if idx < 0 {
return
}
o.workers = append(o.workers[:idx], o.workers[idx+1:]...)
}
// UpdateWorkers overwrites the expected worker list. The update will not take
// affect until the next term. It is safe to invoke AddWorker, RemoveWorker
// and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) UpdateWorkers(workers []Worker) {
o.mu.Lock()
defer o.mu.Unlock()
o.workers = workers
}
// Task stores the required information for a task.
type Task struct {
Definition interface{}
Instances int
}
// AddTask adds a new task to the expected workload. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this task
for _, t := range o.expectedTasks {
if taskDefinition == t.Def | history,
)
} | random_line_split |
orchestrator.go | .Printf("Error trying to list tasks from %s: %s", err.worker, err.err)
case <-t.C:
o.log.Printf("Communicator timeout. Using results available...")
break
}
}
o.lastActual = state
return actual
}
// delta finds what should be added and removed to make actual match the
// expected.
func (o *Orchestrator) delta(actual map[Worker][]interface{}) (toAdd map[interface{}]int, toRemove map[Worker][]interface{}) {
toAdd = make(map[interface{}]int)
toRemove = make(map[Worker][]interface{})
expectedTasks := make([]Task, len(o.expectedTasks))
copy(expectedTasks, o.expectedTasks)
for _, task := range o.expectedTasks {
needs := hasEnoughInstances(task, actual)
if needs == 0 {
continue
}
toAdd[task.Definition] = needs
}
for worker, tasks := range actual {
for _, task := range tasks {
if idx := containsTask(task, expectedTasks); idx >= 0 {
expectedTasks[idx].Instances--
if expectedTasks[idx].Instances == 0 {
expectedTasks = append(expectedTasks[0:idx], expectedTasks[idx+1:]...)
}
continue
}
toRemove[worker] = append(toRemove[worker], task)
}
}
return toAdd, toRemove
}
// assignTask tries to find a worker that does not have too many tasks
// assigned. If it encounters a worker with too many tasks, it will remove
// it from the pool and try again.
func (o *Orchestrator) assignTask(
ctx context.Context,
taskDefinition interface{},
workerLoads []workerLoad,
actual map[Worker][]interface{},
history map[Worker]bool,
) []workerLoad {
activeWorkers := len(actual)
if activeWorkers == 0 {
return workerLoads
}
totalTasks := o.totalTaskCount()
maxTaskCount := totalTasks/activeWorkers + totalTasks%activeWorkers
for i, loadInfo := range workerLoads {
// Ensure that each worker gets an even amount of work assigned.
// Therefore if a worker gets its fair share, remove it from the worker
// pool for this term. This also accounts for there being a non-divisible
// amount of tasks per workers.
loadInfo.taskCount++
if loadInfo.taskCount > maxTaskCount {
workerLoads = append(workerLoads[:i], workerLoads[i+1:]...)
// Recurse since the worker pool was adjusted and the task was
// not assigned.
return o.assignTask(ctx, taskDefinition, workerLoads, actual, history)
}
// Ensure we haven't assigned this task to the worker already.
if history[loadInfo.worker] || contains(taskDefinition, actual[loadInfo.worker]) >= 0 {
continue
}
history[loadInfo.worker] = true
// Assign the task to the worker.
o.log.Printf("Adding task %s to %s.", taskDefinition, loadInfo.worker)
addCtx, _ := context.WithTimeout(ctx, o.timeout)
loadInfo.worker.Add(addCtx, taskDefinition)
// Move updated count to end of slice to help with fairness
workerLoads = append(
append(workerLoads[:i], workerLoads[i+1:]...),
workerLoad{
worker: loadInfo.worker,
taskCount: loadInfo.taskCount,
},
)
break
}
return workerLoads
}
// totalTaskCount calculates the total number of expected task instances.
func (o *Orchestrator) totalTaskCount() int {
var total int
for _, t := range o.expectedTasks {
total += t.Instances
}
return total
}
// AddWorker adds a worker to the known worker cluster. The update will not
// take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) AddWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this worker
idx := containsWorker(worker, o.workers)
if idx > -1 {
return
}
o.workers = append(o.workers, worker)
}
// RemoveWorker removes a worker from the known worker cluster. The update
// will not take affect until the next term. It is safe to invoke AddWorker,
// RemoveWorkers and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) RemoveWorker(worker Worker) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsWorker(worker, o.workers)
if idx < 0 {
return
}
o.workers = append(o.workers[:idx], o.workers[idx+1:]...)
}
// UpdateWorkers overwrites the expected worker list. The update will not take
// affect until the next term. It is safe to invoke AddWorker, RemoveWorker
// and UpdateWorkers on multiple go-routines.
func (o *Orchestrator) UpdateWorkers(workers []Worker) {
o.mu.Lock()
defer o.mu.Unlock()
o.workers = workers
}
// Task stores the required information for a task.
type Task struct {
Definition interface{}
Instances int
}
// AddTask adds a new task to the expected workload. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) AddTask(taskDefinition interface{}, opts ...TaskOption) {
o.mu.Lock()
defer o.mu.Unlock()
// Ensure we don't already have this task
for _, t := range o.expectedTasks {
if taskDefinition == t.Definition {
return
}
}
t := Task{Definition: taskDefinition, Instances: 1}
for _, opt := range opts {
opt(&t)
}
o.expectedTasks = append(o.expectedTasks, t)
}
// TaskOption is used to configure a task when it is being added.
type TaskOption func(*Task)
// WithTaskInstances configures the number of tasks. Defaults to 1.
func WithTaskInstances(i int) TaskOption {
return func(t *Task) {
t.Instances = i
}
}
// RemoveTask removes a task from the expected workload. The update will not
// take affect until the next term. It is safe to invoke AddTask, RemoveTask
// and UpdateTasks on multiple go-routines.
func (o *Orchestrator) RemoveTask(taskDefinition interface{}) {
o.mu.Lock()
defer o.mu.Unlock()
idx := containsTask(taskDefinition, o.expectedTasks)
if idx < 0 {
return
}
o.expectedTasks = append(o.expectedTasks[:idx], o.expectedTasks[idx+1:]...)
}
// UpdateTasks overwrites the expected task list. The update will not take
// affect until the next term. It is safe to invoke AddTask, RemoveTask and
// UpdateTasks on multiple go-routines.
func (o *Orchestrator) UpdateTasks(tasks []Task) {
o.mu.Lock()
defer o.mu.Unlock()
o.expectedTasks = tasks
}
// ListExpectedTasks returns the current list of the expected tasks.
func (o *Orchestrator) ListExpectedTasks() []Task {
o.mu.Lock()
defer o.mu.Unlock()
return o.expectedTasks
}
// WorkerState stores the state of a worker.
type WorkerState struct {
Worker Worker
// Tasks are the task definitions the worker is servicing.
Tasks []interface{}
}
// LastActual returns the actual from the last term. It will return nil
// before the first term.
func (o *Orchestrator) LastActual() []WorkerState {
o.mu.Lock()
defer o.mu.Unlock()
return o.lastActual
}
// rebalance will rebalance tasks across the workers. If any worker has too
// many tasks, it will be added to the remove map, and added to the returned
// add slice.
func rebalance(
toAdd map[interface{}]int,
toRemove,
actual map[Worker][]interface{},
) (map[interface{}]int, map[Worker][]interface{}) {
counts := counts(actual, toRemove)
if len(counts) == 0 {
return toAdd, toRemove
}
var total int
for _, c := range counts {
total += c.taskCount
}
for _, addCount := range toAdd {
total += addCount
}
maxPerNode := total / len(counts)
if maxPerNode == 0 || total%len(counts) != 0 {
maxPerNode++
}
for _, c := range counts {
if c.taskCount > maxPerNode {
task := actual[c.worker][0]
toRemove[c.worker] = append(toRemove[c.worker], task)
toAdd[task]++
}
}
return toAdd, toRemove
}
// hasEnoughInstances looks at each task in the given actual list and ensures
// a worker node is servicing the task.
func hasEnoughInstances(t Task, actual map[Worker][]interface{}) (needs int) {
var count int
for _, a := range actual {
if contains(t.Definition, a) >= 0 {
count++
}
}
return t.Instances - count
}
// contains returns the index of the given interface{} (x) in the slice y. If the
// interface{} is not present in the slice, it returns -1.
func | contains | identifier_name |
|
warnings.go | You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shoot
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/gardener/gardener/pkg/apis/core"
"github.com/gardener/gardener/pkg/apis/core/helper"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
versionutils "github.com/gardener/gardener/pkg/utils/version"
)
// GetWarnings returns warnings for the provided shoot.
func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot == nil {
return nil
}
var warnings []string
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) {
warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details")
}
// TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns.
warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...)
if oldShoot != nil {
warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...)
warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...)
// Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`.
k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25")
k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23")
if k8sLess125 && k8sGreaterEqual123 {
if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" {
warnings = append(warnings, warning)
}
}
}
if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil {
warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.")
}
return warnings
}
func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string {
var warnings []string
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns))
}
return warnings
}
func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) {
return nil
}
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"}
}
var (
rotation = shoot.Status.Credentials.Rotation
warnings []string
)
if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details")
}
if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details")
}
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) &&
(rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details")
}
if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) &&
(rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details")
}
if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details")
}
if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details")
}
return warnings
}
func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return nil
}
var (
warnings []string
recommendedCompletionInterval = credentialsRotationInterval / 3
rotation = shoot.Status.Credentials.Rotation
)
// Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are
// rotated in "one phase" are excluded.
if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval))
}
if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval))
}
if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval))
}
return warnings
}
func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool {
return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold)
}
func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool {
if lastInitiationFinishedTime == nil {
return false
}
if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) {
return false | func isOldEnough(t time.Time, threshold time.Duration) bool {
return t.UTC().Add(threshold).Before(time.Now().UTC())
}
func completionWarning(credentials string, recommendedCompletionInterval time | }
return isOldEnough(lastInitiationFinishedTime.Time, threshold)
}
| random_line_split |
warnings.go | utils "github.com/gardener/gardener/pkg/utils/version"
)
// GetWarnings returns warnings for the provided shoot.
func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot == nil {
return nil
}
var warnings []string
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) {
warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details")
}
// TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns.
warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...)
if oldShoot != nil {
warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...)
warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...)
// Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`.
k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25")
k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23")
if k8sLess125 && k8sGreaterEqual123 {
if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" {
warnings = append(warnings, warning)
}
}
}
if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil {
warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.")
}
return warnings
}
func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string {
var warnings []string
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns))
}
return warnings
}
func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) {
return nil
}
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"}
}
var (
rotation = shoot.Status.Credentials.Rotation
warnings []string
)
if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details")
}
if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details")
}
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) &&
(rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details")
}
if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) &&
(rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details")
}
if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details")
}
if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details")
}
return warnings
}
func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return nil
}
var (
warnings []string
recommendedCompletionInterval = credentialsRotationInterval / 3
rotation = shoot.Status.Credentials.Rotation
)
// Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are
// rotated in "one phase" are excluded.
if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval))
}
if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval))
}
if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval))
}
return warnings
}
func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool {
return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold)
}
func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool {
if lastInitiationFinishedTime == nil {
return false
}
if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) {
return false
}
return isOldEnough(lastInitiationFinishedTime.Time, threshold)
}
func isOldEnough(t time.Time, threshold time.Duration) bool {
return t.UTC().Add(threshold).Before(time.Now().UTC())
}
func completionWarning(credentials string, recommendedCompletionInterval time.Duration) string {
return fmt.Sprintf("the %s rotation initiation was finished more than %s ago and should be completed", credentials, recommendedCompletionInterval)
}
func getWarningsForPSPAdmissionPlugin(shoot *core.Shoot) string | {
if !helper.IsWorkerless(shoot) && shoot.Spec.Kubernetes.KubeAPIServer != nil {
for _, plugin := range shoot.Spec.Kubernetes.KubeAPIServer.AdmissionPlugins {
if plugin.Name == "PodSecurityPolicy" && pointer.BoolDeref(plugin.Disabled, false) {
return ""
}
}
}
return "you should consider migrating to PodSecurity, see https://github.com/gardener/gardener/blob/master/docs/usage/pod-security.md#migrating-from-podsecuritypolicys-to-podsecurity-admission-controller for details"
} | identifier_body |
|
warnings.go | You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shoot
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/gardener/gardener/pkg/apis/core"
"github.com/gardener/gardener/pkg/apis/core/helper"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
versionutils "github.com/gardener/gardener/pkg/utils/version"
)
// GetWarnings returns warnings for the provided shoot.
func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot == nil {
return nil
}
var warnings []string
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) {
warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details")
}
// TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns.
warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...)
if oldShoot != nil {
warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...)
warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...)
// Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`.
k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25")
k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23")
if k8sLess125 && k8sGreaterEqual123 {
if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" {
warnings = append(warnings, warning)
}
}
}
if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil {
warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.")
}
return warnings
}
func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string {
var warnings []string
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok |
return warnings
}
func getWarningsForDueCredentialsRotations(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) {
return nil
}
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"}
}
var (
rotation = shoot.Status.Credentials.Rotation
warnings []string
)
if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details")
}
if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details")
}
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) &&
(rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details")
}
if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) &&
(rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details")
}
if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details")
}
if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details")
}
return warnings
}
func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return nil
}
var (
warnings []string
recommendedCompletionInterval = credentialsRotationInterval / 3
rotation = shoot.Status.Credentials.Rotation
)
// Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are
// rotated in "one phase" are excluded.
if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval))
}
if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval))
}
if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval))
}
return warnings
}
func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool {
return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold)
}
func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool {
if lastInitiationFinishedTime == nil {
return false
}
if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) {
return false
}
return isOldEnough(lastInitiationFinishedTime.Time, threshold)
}
func isOldEnough(t time.Time, threshold time.Duration) bool {
return t.UTC().Add(threshold).Before(time.Now().UTC())
}
func completionWarning(credentials string, recommended | {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns))
} | conditional_block |
warnings.go | You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shoot
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/gardener/gardener/pkg/apis/core"
"github.com/gardener/gardener/pkg/apis/core/helper"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
versionutils "github.com/gardener/gardener/pkg/utils/version"
)
// GetWarnings returns warnings for the provided shoot.
func GetWarnings(_ context.Context, shoot, oldShoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot == nil {
return nil
}
var warnings []string
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) {
warnings = append(warnings, "you should consider disabling the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_access.md for details")
}
// TODO(acumino): Drop this warning in v1.78, with dropping of annotation to enable node-local-dns.
warnings = append(warnings, getWarningsForDeprecatedNodeLocalDNSLabels(shoot)...)
if oldShoot != nil {
warnings = append(warnings, getWarningsForDueCredentialsRotations(shoot, credentialsRotationInterval)...)
warnings = append(warnings, getWarningsForIncompleteCredentialsRotation(shoot, credentialsRotationInterval)...)
// Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`.
k8sLess125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25")
k8sGreaterEqual123, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, ">= 1.23")
if k8sLess125 && k8sGreaterEqual123 {
if warning := getWarningsForPSPAdmissionPlugin(shoot); warning != "" {
warnings = append(warnings, warning)
}
}
}
if kubeControllerManager := shoot.Spec.Kubernetes.KubeControllerManager; kubeControllerManager != nil && kubeControllerManager.PodEvictionTimeout != nil {
warnings = append(warnings, "you are setting the spec.kubernetes.kubeControllerManager.podEvictionTimeout field. The field does not have effect since Kubernetes 1.13. Instead, use the spec.kubernetes.kubeAPIServer.(defaultNotReadyTolerationSeconds/defaultUnreachableTolerationSeconds) fields.")
}
return warnings
}
func getWarningsForDeprecatedNodeLocalDNSLabels(shoot *core.Shoot) []string {
var warnings []string
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNS]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.enabled` in Shoot instead. Switching on node-local-dns via shoot specification will roll the nodes even if node-local-dns was enabled beforehand via annotation.", v1beta1constants.AnnotationNodeLocalDNS))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToClusterDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToClusterDns))
}
if _, ok := shoot.Annotations[v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %v is deprecated. Use field `.spec.systemComponents.nodeLocalDNS.forceTCPToUpstreamDNS` in Shoot instead.", v1beta1constants.AnnotationNodeLocalDNSForceTcpToUpstreamDns))
}
return warnings
}
func | (shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if !isOldEnough(shoot.CreationTimestamp.Time, credentialsRotationInterval) {
return nil
}
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return []string{"you should consider rotating the shoot credentials, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#gardener-provided-credentials for details"}
}
var (
rotation = shoot.Status.Credentials.Rotation
warnings []string
)
if rotation.CertificateAuthorities == nil || initiationDue(rotation.CertificateAuthorities.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the certificate authorities, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#certificate-authorities for details")
}
if rotation.ETCDEncryptionKey == nil || initiationDue(rotation.ETCDEncryptionKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ETCD encryption key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#etcd-encryption-key for details")
}
if pointer.BoolDeref(shoot.Spec.Kubernetes.EnableStaticTokenKubeconfig, true) &&
(rotation.Kubeconfig == nil || initiationDue(rotation.Kubeconfig.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the static token kubeconfig, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#kubeconfig for details")
}
if (shoot.Spec.Purpose == nil || *shoot.Spec.Purpose != core.ShootPurposeTesting) &&
(rotation.Observability == nil || initiationDue(rotation.Observability.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the observability passwords, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#observability-passwords-for-plutono for details")
}
if rotation.ServiceAccountKey == nil || initiationDue(rotation.ServiceAccountKey.LastInitiationTime, credentialsRotationInterval) {
warnings = append(warnings, "you should consider rotating the ServiceAccount token signing key, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#serviceaccount-token-signing-key for details")
}
if helper.ShootEnablesSSHAccess(shoot) && (rotation.SSHKeypair == nil || initiationDue(rotation.SSHKeypair.LastInitiationTime, credentialsRotationInterval)) {
warnings = append(warnings, "you should consider rotating the SSH keypair, see https://github.com/gardener/gardener/blob/master/docs/usage/shoot_credentials_rotation.md#ssh-key-pair-for-worker-nodes for details")
}
return warnings
}
func getWarningsForIncompleteCredentialsRotation(shoot *core.Shoot, credentialsRotationInterval time.Duration) []string {
if shoot.Status.Credentials == nil || shoot.Status.Credentials.Rotation == nil {
return nil
}
var (
warnings []string
recommendedCompletionInterval = credentialsRotationInterval / 3
rotation = shoot.Status.Credentials.Rotation
)
// Only consider credentials for which completion must be triggered explicitly by the user. Credentials which are
// rotated in "one phase" are excluded.
if rotation.CertificateAuthorities != nil && completionDue(rotation.CertificateAuthorities.LastInitiationFinishedTime, rotation.CertificateAuthorities.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("certificate authorities", recommendedCompletionInterval))
}
if rotation.ETCDEncryptionKey != nil && completionDue(rotation.ETCDEncryptionKey.LastInitiationFinishedTime, rotation.ETCDEncryptionKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ETCD encryption key", recommendedCompletionInterval))
}
if rotation.ServiceAccountKey != nil && completionDue(rotation.ServiceAccountKey.LastInitiationFinishedTime, rotation.ServiceAccountKey.LastCompletionTriggeredTime, recommendedCompletionInterval) {
warnings = append(warnings, completionWarning("ServiceAccount token signing key", recommendedCompletionInterval))
}
return warnings
}
func initiationDue(lastInitiationTime *metav1.Time, threshold time.Duration) bool {
return lastInitiationTime == nil || isOldEnough(lastInitiationTime.Time, threshold)
}
func completionDue(lastInitiationFinishedTime, lastCompletionTriggeredTime *metav1.Time, threshold time.Duration) bool {
if lastInitiationFinishedTime == nil {
return false
}
if lastCompletionTriggeredTime != nil && lastCompletionTriggeredTime.Time.UTC().After(lastInitiationFinishedTime.Time.UTC()) {
return false
}
return isOldEnough(lastInitiationFinishedTime.Time, threshold)
}
func isOldEnough(t time.Time, threshold time.Duration) bool {
return t.UTC().Add(threshold).Before(time.Now().UTC())
}
func completionWarning(credentials string, recommendedCompletion | getWarningsForDueCredentialsRotations | identifier_name |
train_gcn.py | permissions and
#limitations under the License.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
import glob
import functools
import pickle
import argparse
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.python.keras.optimizers import Adam, SGD
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras.utils import multi_gpu_model
from utils import buildImageDataset, construct_feed_dict
from custom_layers import *
from augmentation import change_intensity_img, _augment_deformnet
from dataset import get_baseline_dataset, get_baseline_dataset_deformnet
from model import DeformNet
from loss import mesh_loss_geometric_cf, point_loss_cf, binary_bce_dice_loss
from call_backs import *
"""# Set up"""
parser = argparse.ArgumentParser()
parser.add_argument('--im_trains', nargs='+',help='Name of the folder containing the image data')
parser.add_argument('--im_vals', nargs='+', help='Name of the folder containing the image data')
parser.add_argument('--pre_train', default='', help="Filename of the pretrained graph model")
parser.add_argument('--mesh', help='Name of the .dat file containing mesh info')
parser.add_argument('--mesh_txt', nargs='+', help='Name of the mesh_info.txt file with tmplt scale and center into')
parser.add_argument('--output', help='Name of the output folder')
parser.add_argument('--attr_trains', nargs='+', help='Attribute name of the folders containing tf records')
parser.add_argument('--attr_vals', nargs='+', help='Attribute name of the folders containing tf records')
parser.add_argument('--train_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets')
parser.add_argument('--val_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets')
parser.add_argument('--file_pattern', default='*.tfrecords', help='Pattern of the .tfrecords files')
parser.add_argument('--modality', nargs='+', help='Name of the modality, mr, ct, split by space')
parser.add_argument('--num_epoch', type=int, help='Maximum number of epochs to run')
parser.add_argument('--num_seg', type=int,default=1, help='Number of segmentation classes')
parser.add_argument('--seg_weight', type=float, default=1., help='Weight of the segmentation loss')
parser.add_argument('--mesh_ids', nargs='+', type=int, default=[2], help='Number of meshes to train')
parser.add_argument('--batch_size', type=int, default=10, help='Batch size')
parser.add_argument('--shuffle_buffer_size', type=int, default=10000, help='Shuffle buffer size')
parser.add_argument('--lr', type=float, help='Learning rate')
parser.add_argument('--cf_ratio', type=float, default=1., help='Loss ratio between gt chamfer loss and pred chamfer loss')
parser.add_argument('--size', type = int, nargs='+', help='Image dimensions')
parser.add_argument('--weights', type = float, nargs='+', help='Loss weights for geometric loss')
parser.add_argument('--hidden_dim', type = int, default=128, help='Hidden dimension')
parser.add_argument('--amplify_factor', type=float, default=1., help="amplify_factor of the predicted displacements")
args = parser.parse_args()
img_shape = (args.size[0], args.size[1], args.size[2], 1)
save_loss_path = args.output
save_model_path = os.path.join(args.output, "weights_gcn.hdf5")
""" Create new directories """
try:
os.makedirs(os.path.dirname(save_model_path))
os.makedirs(os.path.dirname(save_loss_path))
except Exception as e: print(e)
"""# Feed in mesh info"""
pkl = pickle.load(open(args.mesh, 'rb'))
mesh_info = construct_feed_dict(pkl)
mesh_info['mesh_center'] = [np.zeros(3) for i in range(len(args.mesh_ids))]
mesh_info['mesh_scale'] = [0 for i in range(len(args.mesh_ids))]
mesh_info['mesh_area'] = [0 for i in range(len(args.mesh_ids))]
mesh_info['edge_length_scaled'] = [np.zeros(3) for i in range(len(args.mesh_ids))] # 3 is number of blocks
for txt_fn in args.mesh_txt:
for i in range(len(args.mesh_ids)):
ctr_scale = np.loadtxt(txt_fn)
if len(ctr_scale.shape)==1:
ctr_scale = np.expand_dims(ctr_scale, axis=0)
mesh_info['mesh_center'][i] += ctr_scale[i, :-2]/len(args.modality)
mesh_info['mesh_scale'][i] += ctr_scale[i, -2]/len(args.modality)
mesh_info['mesh_area'][i] += ctr_scale[i, -1]/len(args.modality)
for i in range(len(args.mesh_ids)):
r = mesh_info['mesh_scale'][i]*2
scale = r * np.mean(args.size)
area_ratio = mesh_info['mesh_area'][i]/(4*np.pi*r*r)
mesh_info['edge_length_scaled'][i] = np.array(mesh_info['edge_length']) * scale * scale * area_ratio
print("Mesh center, scale: ", mesh_info['mesh_center'], mesh_info['mesh_scale'])
print("Mesh edge: ", mesh_info['edge_length_scaled'])
"""## Set up train and validation datasets
Note that we apply image augmentation to our training dataset but not our validation dataset.
"""
tr_cfg = {'change_intensity': {"scale": [0.9, 1.1],"shift": [-0.1, 0.1]}}
tr_preprocessing_fn = functools.partial(_augment_deformnet, **tr_cfg)
if_seg = True if args.num_seg>0 else False
val_preprocessing_fn = functools.partial(_augment_deformnet)
train_ds_list, val_ds_list = [], []
train_ds_num, val_ds_num = [], []
for data_folder_out, attr in zip(args.im_trains, args.attr_trains):
x_train_filenames_i = buildImageDataset(data_folder_out, args.modality, 41, mode='_train'+attr, ext=args.file_pattern)
train_ds_num.append(len(x_train_filenames_i))
train_ds_i = get_baseline_dataset_deformnet(x_train_filenames_i, preproc_fn=tr_preprocessing_fn, mesh_ids=args.mesh_ids, \
shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg)
train_ds_list.append(train_ds_i)
for data_val_folder_out, attr in zip(args.im_vals, args.attr_vals):
x_val_filenames_i = buildImageDataset(data_val_folder_out, args.modality, 41, mode='_val'+attr, ext=args.file_pattern)
val_ds_num.append(len(x_val_filenames_i))
val_ds_i = get_baseline_dataset_deformnet(x_val_filenames_i, preproc_fn=val_preprocessing_fn, mesh_ids=args.mesh_ids, \
shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg)
val_ds_list.append(val_ds_i)
train_data_weights = [w/np.sum(args.train_data_weights) for w in args.train_data_weights]
val_data_weights = [w/np.sum(args.val_data_weights) for w in args.val_data_weights]
print("Sampling probability for train and val datasets: ", train_data_weights, val_data_weights)
train_ds = tf.data.experimental.sample_from_datasets(train_ds_list, weights=train_data_weights)
train_ds = train_ds.batch(args.batch_size)
val_ds = tf.data.experimental.sample_from_datasets(val_ds_list, weights=val_data_weights)
val_ds = val_ds.batch(args.batch_size)
num_train_examples = train_ds_num[np.argmax(train_data_weights)]/np.max(train_data_weights)
num_val_examples = val_ds_num[np.argmax(val_data_weights)]/np.max(val_data_weights)
print("Number of train, val samples after reweighting: ", num_train_examples, num_val_examples)
"""# Build the model"""
model = DeformNet(args.batch_size, img_shape, mesh_info, amplify_factor=args.amplify_factor,num_mesh=len(args.mesh_ids), num_seg=args.num_seg)
unet_gcn = model.build_keras()
unet_gcn.summary(line_length=150)
adam = Adam(lr=args.lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=True)
output_keys = [node.op.name.split('/')[0] for node in unet_gcn.outputs]
print("Output Keys: ", output_keys)
if args.num_seg >0:
losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][(i-1)%len(args.mesh_ids)]) for i in range(1, len(output_keys))]
losses = [binary_bce_dice_loss] + losses
else:
losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][i%len(args.mesh_ids)]) for i in range(len(output_keys))]
losses = dict(zip(output_keys, losses))
metric_loss, metric_key = [], []
for i in range(1, len(args.mesh_ids)+1):
metric_key.append(output_keys[-i])
metric_loss.append(point_loss_cf)
metrics_losses = dict(zip(metric_key, metric_loss))
metric_loss_weights = list(np.ones(len(args.mesh_ids)))
loss_weights = list(np.ones(len(output_keys)))
if args.num_seg > 0:
| loss_weights[0] = args.seg_weight | conditional_block |
|
train_gcn.py | KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
import glob
import functools
import pickle
import argparse
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.python.keras.optimizers import Adam, SGD
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras.utils import multi_gpu_model
from utils import buildImageDataset, construct_feed_dict
from custom_layers import *
from augmentation import change_intensity_img, _augment_deformnet
from dataset import get_baseline_dataset, get_baseline_dataset_deformnet
from model import DeformNet
from loss import mesh_loss_geometric_cf, point_loss_cf, binary_bce_dice_loss
from call_backs import *
"""# Set up"""
parser = argparse.ArgumentParser()
parser.add_argument('--im_trains', nargs='+',help='Name of the folder containing the image data')
parser.add_argument('--im_vals', nargs='+', help='Name of the folder containing the image data')
parser.add_argument('--pre_train', default='', help="Filename of the pretrained graph model")
parser.add_argument('--mesh', help='Name of the .dat file containing mesh info')
parser.add_argument('--mesh_txt', nargs='+', help='Name of the mesh_info.txt file with tmplt scale and center into')
parser.add_argument('--output', help='Name of the output folder')
parser.add_argument('--attr_trains', nargs='+', help='Attribute name of the folders containing tf records')
parser.add_argument('--attr_vals', nargs='+', help='Attribute name of the folders containing tf records')
parser.add_argument('--train_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets')
parser.add_argument('--val_data_weights', type=float, nargs='+', help='Weights to apply for the samples in different datasets')
parser.add_argument('--file_pattern', default='*.tfrecords', help='Pattern of the .tfrecords files')
parser.add_argument('--modality', nargs='+', help='Name of the modality, mr, ct, split by space')
parser.add_argument('--num_epoch', type=int, help='Maximum number of epochs to run')
parser.add_argument('--num_seg', type=int,default=1, help='Number of segmentation classes')
parser.add_argument('--seg_weight', type=float, default=1., help='Weight of the segmentation loss')
parser.add_argument('--mesh_ids', nargs='+', type=int, default=[2], help='Number of meshes to train')
parser.add_argument('--batch_size', type=int, default=10, help='Batch size')
parser.add_argument('--shuffle_buffer_size', type=int, default=10000, help='Shuffle buffer size')
parser.add_argument('--lr', type=float, help='Learning rate')
parser.add_argument('--cf_ratio', type=float, default=1., help='Loss ratio between gt chamfer loss and pred chamfer loss')
parser.add_argument('--size', type = int, nargs='+', help='Image dimensions')
parser.add_argument('--weights', type = float, nargs='+', help='Loss weights for geometric loss')
parser.add_argument('--hidden_dim', type = int, default=128, help='Hidden dimension')
parser.add_argument('--amplify_factor', type=float, default=1., help="amplify_factor of the predicted displacements")
args = parser.parse_args()
img_shape = (args.size[0], args.size[1], args.size[2], 1)
save_loss_path = args.output
save_model_path = os.path.join(args.output, "weights_gcn.hdf5")
""" Create new directories """
try:
os.makedirs(os.path.dirname(save_model_path))
os.makedirs(os.path.dirname(save_loss_path))
except Exception as e: print(e)
"""# Feed in mesh info"""
pkl = pickle.load(open(args.mesh, 'rb'))
mesh_info = construct_feed_dict(pkl)
mesh_info['mesh_center'] = [np.zeros(3) for i in range(len(args.mesh_ids))]
mesh_info['mesh_scale'] = [0 for i in range(len(args.mesh_ids))]
mesh_info['mesh_area'] = [0 for i in range(len(args.mesh_ids))]
mesh_info['edge_length_scaled'] = [np.zeros(3) for i in range(len(args.mesh_ids))] # 3 is number of blocks
for txt_fn in args.mesh_txt:
for i in range(len(args.mesh_ids)):
ctr_scale = np.loadtxt(txt_fn)
if len(ctr_scale.shape)==1:
ctr_scale = np.expand_dims(ctr_scale, axis=0)
mesh_info['mesh_center'][i] += ctr_scale[i, :-2]/len(args.modality)
mesh_info['mesh_scale'][i] += ctr_scale[i, -2]/len(args.modality)
mesh_info['mesh_area'][i] += ctr_scale[i, -1]/len(args.modality)
for i in range(len(args.mesh_ids)):
r = mesh_info['mesh_scale'][i]*2
scale = r * np.mean(args.size)
area_ratio = mesh_info['mesh_area'][i]/(4*np.pi*r*r)
mesh_info['edge_length_scaled'][i] = np.array(mesh_info['edge_length']) * scale * scale * area_ratio
print("Mesh center, scale: ", mesh_info['mesh_center'], mesh_info['mesh_scale'])
print("Mesh edge: ", mesh_info['edge_length_scaled'])
"""## Set up train and validation datasets
Note that we apply image augmentation to our training dataset but not our validation dataset.
"""
tr_cfg = {'change_intensity': {"scale": [0.9, 1.1],"shift": [-0.1, 0.1]}}
tr_preprocessing_fn = functools.partial(_augment_deformnet, **tr_cfg)
if_seg = True if args.num_seg>0 else False
val_preprocessing_fn = functools.partial(_augment_deformnet)
train_ds_list, val_ds_list = [], []
train_ds_num, val_ds_num = [], []
for data_folder_out, attr in zip(args.im_trains, args.attr_trains):
x_train_filenames_i = buildImageDataset(data_folder_out, args.modality, 41, mode='_train'+attr, ext=args.file_pattern)
train_ds_num.append(len(x_train_filenames_i))
train_ds_i = get_baseline_dataset_deformnet(x_train_filenames_i, preproc_fn=tr_preprocessing_fn, mesh_ids=args.mesh_ids, \
shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg)
train_ds_list.append(train_ds_i)
for data_val_folder_out, attr in zip(args.im_vals, args.attr_vals):
x_val_filenames_i = buildImageDataset(data_val_folder_out, args.modality, 41, mode='_val'+attr, ext=args.file_pattern)
val_ds_num.append(len(x_val_filenames_i))
val_ds_i = get_baseline_dataset_deformnet(x_val_filenames_i, preproc_fn=val_preprocessing_fn, mesh_ids=args.mesh_ids, \
shuffle_buffer=args.shuffle_buffer_size, if_seg=if_seg)
val_ds_list.append(val_ds_i)
train_data_weights = [w/np.sum(args.train_data_weights) for w in args.train_data_weights]
val_data_weights = [w/np.sum(args.val_data_weights) for w in args.val_data_weights]
print("Sampling probability for train and val datasets: ", train_data_weights, val_data_weights)
train_ds = tf.data.experimental.sample_from_datasets(train_ds_list, weights=train_data_weights)
train_ds = train_ds.batch(args.batch_size)
val_ds = tf.data.experimental.sample_from_datasets(val_ds_list, weights=val_data_weights)
val_ds = val_ds.batch(args.batch_size)
| """# Build the model"""
model = DeformNet(args.batch_size, img_shape, mesh_info, amplify_factor=args.amplify_factor,num_mesh=len(args.mesh_ids), num_seg=args.num_seg)
unet_gcn = model.build_keras()
unet_gcn.summary(line_length=150)
adam = Adam(lr=args.lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=True)
output_keys = [node.op.name.split('/')[0] for node in unet_gcn.outputs]
print("Output Keys: ", output_keys)
if args.num_seg >0:
losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][(i-1)%len(args.mesh_ids)]) for i in range(1, len(output_keys))]
losses = [binary_bce_dice_loss] + losses
else:
losses = [ mesh_loss_geometric_cf(mesh_info, 3, args.weights, args.cf_ratio, mesh_info['edge_length_scaled'][i%len(args.mesh_ids)]) for i in range(len(output_keys))]
losses = dict(zip(output_keys, losses))
metric_loss, metric_key = [], []
for i in range(1, len(args.mesh_ids)+1):
metric_key.append(output_keys[-i])
metric_loss.append(point_loss_cf)
metrics_losses = dict(zip(metric_key, metric_loss))
metric_loss_weights = list(np.ones(len(args.mesh_ids)))
loss_weights = list(np.ones(len(output_keys)))
if args.num | num_train_examples = train_ds_num[np.argmax(train_data_weights)]/np.max(train_data_weights)
num_val_examples = val_ds_num[np.argmax(val_data_weights)]/np.max(val_data_weights)
print("Number of train, val samples after reweighting: ", num_train_examples, num_val_examples)
| random_line_split |
mod.rs | following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn s | &self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if !chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel != 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i | et_origin( | identifier_name |
mod.rs | following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
| };
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if !chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel != 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
| use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source, | random_line_split |
mod.rs | following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize { |
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i,
|
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if !chan.channel.in_use() {
return i;
}
// replace sounds on the same entity channel
if ent_channel != 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds | identifier_body |
mod.rs | following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
mod music;
pub use music::MusicPlayer;
use std::{
cell::{Cell, RefCell},
io::{self, BufReader, Cursor, Read},
};
use crate::common::vfs::{Vfs, VfsError};
use cgmath::{InnerSpace, Vector3};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, OutputStreamHandle, Sink, Source,
};
use thiserror::Error;
use chrono::Duration;
pub const DISTANCE_ATTENUATION_FACTOR: f32 = 0.001;
const MAX_ENTITY_CHANNELS: usize = 128;
#[derive(Error, Debug)]
pub enum SoundError {
#[error("No such music track: {0}")]
NoSuchTrack(String),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Virtual filesystem error: {0}")]
Vfs(#[from] VfsError),
#[error("WAV decoder error: {0}")]
Decoder(#[from] rodio::decoder::DecoderError),
}
/// Data needed for sound spatialization.
///
/// This struct is updated every frame.
#[derive(Debug)]
pub struct Listener {
origin: Cell<Vector3<f32>>,
left_ear: Cell<Vector3<f32>>,
right_ear: Cell<Vector3<f32>>,
}
impl Listener {
pub fn new() -> Listener {
Listener {
origin: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
left_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
right_ear: Cell::new(Vector3::new(0.0, 0.0, 0.0)),
}
}
pub fn origin(&self) -> Vector3<f32> {
self.origin.get()
}
pub fn left_ear(&self) -> Vector3<f32> {
self.left_ear.get()
}
pub fn right_ear(&self) -> Vector3<f32> {
self.right_ear.get()
}
pub fn set_origin(&self, new_origin: Vector3<f32>) {
self.origin.set(new_origin);
}
pub fn set_left_ear(&self, new_origin: Vector3<f32>) {
self.left_ear.set(new_origin);
}
pub fn set_right_ear(&self, new_origin: Vector3<f32>) {
self.right_ear.set(new_origin);
}
pub fn attenuate(
&self,
emitter_origin: Vector3<f32>,
base_volume: f32,
attenuation: f32,
) -> f32 {
let decay = (emitter_origin - self.origin.get()).magnitude()
* attenuation
* DISTANCE_ATTENUATION_FACTOR;
let volume = ((1.0 - decay) * base_volume).max(0.0);
volume
}
}
#[derive(Clone)]
pub struct AudioSource(Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>);
impl AudioSource {
pub fn load<S>(vfs: &Vfs, name: S) -> Result<AudioSource, SoundError>
where
S: AsRef<str>,
{
let name = name.as_ref();
let full_path = "sound/".to_owned() + name;
let mut file = vfs.open(&full_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let src = Decoder::new(Cursor::new(data))?
.convert_samples()
.buffered();
Ok(AudioSource(src))
}
}
pub struct StaticSound {
origin: Vector3<f32>,
sink: RefCell<Sink>,
volume: f32,
attenuation: f32,
}
impl StaticSound {
pub fn new(
stream: &OutputStreamHandle,
origin: Vector3<f32>,
src: AudioSource,
volume: f32,
attenuation: f32,
listener: &Listener,
) -> StaticSound {
// TODO: handle PlayError once PR accepted
let sink = Sink::try_new(&stream).unwrap();
let infinite = src.0.clone().repeat_infinite();
sink.append(infinite);
sink.set_volume(listener.attenuate(origin, volume, attenuation));
StaticSound {
origin,
sink: RefCell::new(sink),
volume,
attenuation,
}
}
pub fn update(&self, listener: &Listener) {
let sink = self.sink.borrow_mut();
sink.set_volume(listener.attenuate(self.origin, self.volume, self.attenuation));
}
}
/// Represents a single audio channel, capable of playing one sound at a time.
pub struct Channel {
stream: OutputStreamHandle,
sink: RefCell<Option<Sink>>,
master_vol: Cell<f32>,
attenuation: Cell<f32>,
}
impl Channel {
/// Create a new `Channel` backed by the given `Device`.
pub fn new(stream: OutputStreamHandle) -> Channel {
Channel {
stream,
sink: RefCell::new(None),
master_vol: Cell::new(0.0),
attenuation: Cell::new(0.0),
}
}
/// Play a new sound on this channel, cutting off any sound that was previously playing.
pub fn play(
&self,
src: AudioSource,
ent_pos: Vector3<f32>,
listener: &Listener,
volume: f32,
attenuation: f32,
) {
self.master_vol.set(volume);
self.attenuation.set(attenuation);
// stop the old sound
self.sink.replace(None);
// start the new sound
let new_sink = Sink::try_new(&self.stream).unwrap();
new_sink.append(src.0);
new_sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
self.sink.replace(Some(new_sink));
}
pub fn update(&self, ent_pos: Vector3<f32>, listener: &Listener) {
if let Some(ref sink) = *self.sink.borrow_mut() {
// attenuate using quake coordinates since distance is the same either way
sink.set_volume(listener.attenuate(
ent_pos,
self.master_vol.get(),
self.attenuation.get(),
));
};
}
/// Stop the sound currently playing on this channel, if there is one.
pub fn stop(&self) {
self.sink.replace(None);
}
/// Returns whether or not this `Channel` is currently in use.
pub fn in_use(&self) -> bool {
let replace_sink;
match *self.sink.borrow() {
Some(ref sink) => replace_sink = sink.empty(),
None => return false,
}
// if the sink isn't in use, free it
if replace_sink {
self.sink.replace(None);
false
} else {
true
}
}
}
pub struct EntityChannel {
start_time: Duration,
// if None, sound is associated with a temp entity
ent_id: Option<usize>,
ent_channel: i8,
channel: Channel,
}
impl EntityChannel {
pub fn channel(&self) -> &Channel {
&self.channel
}
pub fn entity_id(&self) -> Option<usize> {
self.ent_id
}
}
pub struct EntityMixer {
stream: OutputStreamHandle,
// TODO: replace with an array once const type parameters are implemented
channels: Box<[Option<EntityChannel>]>,
}
impl EntityMixer {
pub fn new(stream: OutputStreamHandle) -> EntityMixer {
let mut channel_vec = Vec::new();
for _ in 0..MAX_ENTITY_CHANNELS {
channel_vec.push(None);
}
EntityMixer {
stream,
channels: channel_vec.into_boxed_slice(),
}
}
fn find_free_channel(&self, ent_id: Option<usize>, ent_channel: i8) -> usize {
let mut oldest = 0;
for (i, channel) in self.channels.iter().enumerate() {
match *channel {
Some(ref chan) => {
// if this channel is free, return it
if !chan.channel.in_use() { |
// replace sounds on the same entity channel
if ent_channel != 0
&& chan.ent_id == ent_id
&& (chan.ent_channel == ent_channel || ent_channel == -1)
{
return i;
}
// TODO: don't clobber player sounds with monster sounds
// keep track of which sound started the earliest
match self.channels[oldest] {
Some(ref o) => {
if chan.start_time < o.start_time {
oldest = i;
}
}
None => oldest = i |
return i;
}
| conditional_block |
dropck.rs | cx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
// | // definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill , rather than the
// repeated `contains` calls.
if !assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
| // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item | random_line_split |
dropck.rs | if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
named_type, fresh_impl_self_ty) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0366,
"Implementations of Drop cannot be specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
//
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item
// definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill , rather than the
// repeated `contains` calls.
if !assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = | {
let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| {
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
let named_type = tcx.lookup_item_type(self_type_did).ty;
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
let fresh_impl_substs =
infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did);
let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs);
| identifier_body |
|
dropck.rs | specialized")
.span_note(item_span,
"Use same sequence of generic type and region \
parameters that is on the struct/enum definition")
.emit();
return Err(());
}
if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
// this could be reached when we get lazy normalization
infcx.report_fulfillment_errors(errors);
return Err(());
}
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
Ok(())
})
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> {
// Here is an example, analogous to that from
// `compare_impl_method`.
//
// Consider a struct type:
//
// struct Type<'c, 'b:'c, 'a> {
// x: &'a Contents // (contents are irrelevant;
// y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
// }
//
// and a Drop impl:
//
// impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
// fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
// }
//
// We start out with self_to_impl_substs, that maps the generic
// parameters of Type to that of the Drop impl.
//
// self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
//
// Applying this to the predicates (i.e. assumptions) provided by the item
// definition yields the instantiated assumptions:
//
// ['y : 'z]
//
// We then check all of the predicates of the Drop impl:
//
// ['y:'z, 'x:'y]
//
// and ensure each is in the list of instantiated
// assumptions. Here, `'y:'z` is present, but `'x:'y` is
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
let tcx = ccx.tcx;
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP);
// We can assume the predicates attached to struct/enum definition
// hold.
let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
// An earlier version of this code attempted to do this checking
// via the traits::fulfill machinery. However, it ran into trouble
// since the fulfill machinery merely turns outlives-predicates
// 'a:'b and T:'b into region inference constraints. It is simpler
// just to look for all the predicates directly.
assert_eq!(dtor_predicates.parent, None);
for predicate in &dtor_predicates.predicates {
// (We do not need to worry about deep analysis of type
// expressions etc because the Drop impls are already forced
// to take on a structure that is roughly an alpha-renaming of
// the generic parameters of the item definition.)
// This path now just checks *all* predicates via the direct
// lookup, rather than using fulfill machinery.
//
// However, it may be more efficient in the future to batch
// the analysis together via the fulfill , rather than the
// repeated `contains` calls.
if !assumptions_in_impl_context.contains(&predicate) {
let item_span = tcx.map.span(self_type_node_id);
struct_span_err!(tcx.sess, drop_impl_span, E0367,
"The requirement `{}` is added only by the Drop impl.", predicate)
.span_note(item_span,
"The same requirement must be part of \
the struct/enum definition")
.emit();
}
}
if tcx.sess.has_errors() {
return Err(());
}
Ok(())
}
/// check_safety_of_destructor_if_necessary confirms that the type
/// expression `typ` conforms to the "Drop Check Rule" from the Sound
/// Generic Drop (RFC 769).
///
/// ----
///
/// The simplified (*) Drop Check Rule is the following:
///
/// Let `v` be some value (either temporary or named) and 'a be some
/// lifetime (scope). If the type of `v` owns data of type `D`, where
///
/// * (1.) `D` has a lifetime- or type-parametric Drop implementation,
/// (where that `Drop` implementation does not opt-out of
/// this check via the `unsafe_destructor_blind_to_params`
/// attribute), and
/// * (2.) the structure of `D` can reach a reference of type `&'a _`,
///
/// then 'a must strictly outlive the scope of v.
///
/// ----
///
/// This function is meant to by applied to the type for every
/// expression in the program.
///
/// ----
///
/// (*) The qualifier "simplified" is attached to the above
/// definition of the Drop Check Rule, because it is a simplification
/// of the original Drop Check rule, which attempted to prove that
/// some `Drop` implementations could not possibly access data even if
/// it was technically reachable, due to parametricity.
///
/// However, (1.) parametricity on its own turned out to be a
/// necessary but insufficient condition, and (2.) future changes to
/// the language are expected to make it impossible to ensure that a
/// `Drop` implementation is actually parametric with respect to any
/// particular type parameter. (In particular, impl specialization is
/// expected to break the needed parametricity property beyond
/// repair.)
///
/// Therefore we have scaled back Drop-Check to a more conservative
/// rule that does not attempt to deduce whether a `Drop`
/// implementation could not possible access data of a given lifetime;
/// instead Drop-Check now simply assumes that if a destructor has
/// access (direct or indirect) to a lifetime parameter, then that
/// lifetime must be forced to outlive that destructor's dynamic
/// extent. We then provide the `unsafe_destructor_blind_to_params`
/// attribute as a way for destructor implementations to opt-out of
/// this conservative assumption (and thus assume the obligation of
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum | Error | identifier_name |
|
dropck.rs | _necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
typ: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
let result = iterate_over_potentially_unsafe_regions_in_type(
&mut DropckContext {
rcx: rcx,
span: span,
parent_scope: parent_scope,
breadcrumbs: FnvHashSet()
},
TypeContext::Root,
typ,
0);
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
TypeContext::Root => {
// no need for an additional note if the overflow
// was somehow on the root.
}
TypeContext::ADT { def_id, variant, field } => {
let adt = tcx.lookup_adt_def(def_id);
let variant_name = match adt.adt_kind() {
AdtKind::Enum => format!("enum {} variant {}",
tcx.item_path_str(def_id),
variant),
AdtKind::Struct => format!("struct {}",
tcx.item_path_str(def_id)),
AdtKind::Union => format!("union {}",
tcx.item_path_str(def_id)),
};
span_note!(
&mut err,
span,
"overflowed on {} field {} type: {}",
variant_name,
field,
detected_on_typ);
}
}
err.emit();
}
}
}
enum Error<'tcx> {
Overflow(TypeContext, ty::Ty<'tcx>),
}
#[derive(Copy, Clone)]
enum TypeContext {
Root,
ADT {
def_id: DefId,
variant: ast::Name,
field: ast::Name,
}
}
struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
span: Span,
/// the scope reachable dtorck types must outlive
parent_scope: region::CodeExtent
}
// `context` is used for reporting overflow errors
fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
if depth / 4 >= recursion_limit {
// This can get into rather deep recursion, especially in the
// presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
// use a higher recursion limit to avoid errors.
return Err(Error::Overflow(context, ty))
}
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if !cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - cached",
(0..depth).map(|_| ' ').collect::<String>(),
ty, cx.parent_scope);
return Ok(()); // we already visited this type
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?}",
(0..depth).map(|_| ' ').collect::<String>(),
ty, cx.parent_scope);
// If `typ` has a destructor, then we must ensure that all
// borrowed data reachable via `typ` must outlive the parent
// of `scope`. This is handled below.
//
// However, there is an important special case: for any Drop
// impl that is tagged as "blind" to their parameters,
// we assume that data borrowed via such type parameters
// remains unreachable via that Drop impl.
//
// For example, consider:
//
// ```rust
// #[unsafe_destructor_blind_to_params]
// impl<T> Drop for Vec<T> { ... }
// ```
//
// which does have to be able to drop instances of `T`, but
// otherwise cannot read data from `T`.
//
// Of course, for the type expression passed in for any such
// unbounded type parameter `T`, we must resume the recursive
// analysis on `T` (since it would be ignored by
// type_must_outlive).
if has_dtor_of_interest(tcx, ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} - is a dtorck type!",
(0..depth).map(|_| ' ').collect::<String>(),
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
debug!("iterate_over_potentially_unsafe_regions_in_type \
{}ty: {} scope: {:?} - checking interior",
(0..depth).map(|_| ' ').collect::<String>(),
ty, cx.parent_scope);
// We still need to ensure all referenced data is safe.
match ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
// primitive - definitely safe
Ok(())
}
ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
// single-element containers, behave like their element
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
ty::TyAdt(def, substs) => {
let did = def.did;
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
TypeContext::ADT {
def_id: did,
field: field.name,
variant: variant.name,
},
fty,
depth+1)?
}
}
Ok(())
}
ty::TyTuple(tys) |
ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
Ok(())
}
ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
// these always come with a witness of liveness (references
// explicitly, pointers implicitly, parameters by the
// caller).
Ok(())
}
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
// FIXME(#26656): this type is always destruction-safe, but
// it implicitly witnesses Self: Fn, which can be false.
Ok(())
}
ty::TyInfer(..) | ty::TyError => {
tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck");
Ok(())
}
// these are always dtorck
ty::TyTrait(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(),
}
}
fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyAdt(def, _) => | {
def.is_dtorck(tcx)
} | conditional_block |
|
views.py | .objects.filter(username=username).first() # 当前对象
print('user', user)
if not user: # 判断是否已经存在
return render(request, 'not_exit.html', locals())
# 当前站点对象
blog = user.blog
# 查询当前站点的每一个分类名称以及对应文章数
c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点的每一个标签名称以及对应文章数
t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点每一个年月以及对应文章数
c_t_articles = Article.objects.filter(user=user). \
extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \
values('c_date').annotate(c=Count('nid')).values('c_date', 'c')
articles = Article.objects.filter(user=user)
return user,c_articles,t_articles,c_t_articles,articles,blog
@login_required
def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数
"""
个人站点
:param request:
:param username:
:return:
"""
if username == request.user.username:
user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username)
if kwargs: # 个人站点跳转
condition = kwargs['condition']
param = kwargs['param']
if condition == 'category':
articles = Article.objects.filter(user=user).filter(category__title=param)
if condition == 'tag':
articles = Article.objects.filter(user=user).filter(tags__title=param)
if condition == 'archive':
year, month = param.split('-')
articles = Article.objects.filter(user=user)\
.filter(create_time__year=year, create_time__month=month) # USE_TZ = False
return render(request, 'home_site.html', locals())
else:
return redirect('/login/')
def article_detail(request, username, article_id):
"""
文章详情页
:param request:
:param username:
:param article_id:
:return:
"""
article_obj = Article.objects.filter(nid=article_id).first()
comments = Comment.objects.filter(article_id=article_id)
return render(request, 'article_detail.html', locals())
def digg(request):
"""
点赞
:param request:
:return:
"""
is_up = json.loads(request.POST.get('is_up')) # 反序列化
user_id = request.user.pk
article_id = request.POST.get('article_id')
obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first()
response = {'state': False, 'msg': None}
if not obj: # 该用户没对本文章进行操作
ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id)
queryset = Article.objects.filter(pk=article_id)
if is_up: # 更新文章的数据
queryset.update(up_count=F('up_count')+1)
else:
queryset.update(down_count=F('down_count')+1)
else:
response['state'] = True
if obj.is_up:
response['msg'] = '您已经点赞过!'
else:
response['msg'] = '您已经点踩过!'
return JsonResponse(response)
def comment(request):
article_id = request.POST.get('article_id')
pid = request.POST.get('pid')
content = request.POST.get('content')
user_id = request.user.pk
# 事务操作,必须同时成功,同时失败
with transaction.atomic():
ret = Comment.objects.create(user_id=user_id, content=content,
article_id=article_id, parent_comment_id=pid)
Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1)
# 构件根评论添加时所需数据
response = {}
response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X")
response['username'] = request.user.username
response['content'] = ret.content
article_obj = Article.objects.filter(nid=article_id).first()
# 给该文章作者发送邮件,通知其有人评论
from cnblog.settings import EMAIL_HOST_USER
import threading
# send_mail(
# "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息
# content, # 邮件内容
# EMAIL_HOST_USER, # 发送方
# ['[email protected]'] # 接收方
# )
t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间
"您的文章%s新增了一条评论内容" % article_obj.title,
content,
EMAIL_HOST_USER,
['[email protected]']
))
t.start()
return JsonResponse(response)
def get_comment_tree(request):
article_id = request.GET.get('article_id')
# 转换成数组
ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid'))
return JsonResponse(ret, safe=False) # 传列表,需改成false
def backend(request, username):
# 当前用户文章列表
username = username
print(username+'456789')
article_list = Article.objects.filter(user__username=username)
return render(request, 'backend.html', locals())
def article_del(request):
"""
删除文章
:param request:
:return:
"""
username = request.POST.get('username')
article_id = request.POST.get('article_id')
Article.objects.filter(pk=article_id).delete()
Comment.objects.filter(article_id=article_id).delete()
return HttpResponse('删除成功!')
def article_edit(request, article_id):
"""
编辑修改某一篇文章
:param request:
:return:
"""
article_id = article_id
article_obj = Article.objects.filter(nid=article_id).first()
return render(request, 'article_edit.html', locals())
def article_update(request):
username = request.user.username
if request.method == 'POST':
article_id = request.POST.get('article_id')
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
@login_required
def add_article(request):
username = request.user.username
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.create(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
def upload(request):
"""
文章的图片上传
:param request:
:return:
"""
img = request.FILES.get('upload_img') # 读取上传的文件
path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径
with open(path, 'wb') as f: # 保存
for i in img:
f.write(i)
response = {
'error': 0,
'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览
}
return HttpResponse(json.dumps(response))
def logout(request): # 注销
auth.logout(request)
return redirect('/index/')
def login(request): # 登录
if request.method == 'POST':
user = request.POST.get('user')
pwd = request.POST.get('pwd')
validcode = request.POST.get('validcode') # 浏览器提交的
valid_code = request.session.get('valid_code') # 保存在服务器的
resopnse = {'user': None, 'msg': None} | if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写
ret = auth.authenticate(username=user, password=pwd) | random_line_split |
|
views.py | _articles,t_articles,c_t_articles,articles
:param request:
:param username:
:return:
"""
user = UserInfo.objects.filter(username=username).first() # 当前对象
print('user', user)
if not user: # 判断是否已经存在
return render(request, 'not_exit.html', locals())
# 当前站点对象
blog = user.blog
# 查询当前站点的每一个分类名称以及对应文章数
c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点的每一个标签名称以及对应文章数
t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点每一个年月以及对应文章数
c_t_articles = Article.objects.filter(user=user). \
extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \
values('c_date').annotate(c=Count('nid')).values('c_date', 'c')
articles = Article.objects.filter(user=user)
return user,c_articles,t_articles,c_t_articles,articles,blog
@login_required
def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数
"""
个人站点
:param request:
:param username:
:return:
"""
if username == request.user.username:
user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username)
if kwargs: # 个人站点跳转
condition = kwargs['condition']
param = kwargs['param']
if condition == 'category':
articles = Article.objects.filter(user=user).filter(category__title=param)
if condition == 'tag':
articles = Article.objects.filter(user=user).filter(tags__title=param)
if condition == 'archive':
year, month = param.split('-')
articles = Article.objects.filter(user=user)\
.filter(create_time__year=year, create_time__month=month) # USE_TZ = False
return render(request, 'home_site.html', locals())
else:
return redirect('/login/')
def article_detail(request, username, article_id):
"""
文章详情页
:param request:
:param username:
:param article_id:
:return:
"""
article_obj = Article.objects.filter(nid=article_id).first()
comments = Comment.objects.filter(article_id=article_id)
return | .filter(user_id=user_id, article_id=article_id).first()
response = {'state': False, 'msg': None}
if not obj: # 该用户没对本文章进行操作
ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id)
queryset = Article.objects.filter(pk=article_id)
if is_up: # 更新文章的数据
queryset.update(up_count=F('up_count')+1)
else:
queryset.update(down_count=F('down_count')+1)
else:
response['state'] = True
if obj.is_up:
response['msg'] = '您已经点赞过!'
else:
response['msg'] = '您已经点踩过!'
return JsonResponse(response)
def comment(request):
article_id = request.POST.get('article_id')
pid = request.POST.get('pid')
content = request.POST.get('content')
user_id = request.user.pk
# 事务操作,必须同时成功,同时失败
with transaction.atomic():
ret = Comment.objects.create(user_id=user_id, content=content,
article_id=article_id, parent_comment_id=pid)
Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1)
# 构件根评论添加时所需数据
response = {}
response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X")
response['username'] = request.user.username
response['content'] = ret.content
article_obj = Article.objects.filter(nid=article_id).first()
# 给该文章作者发送邮件,通知其有人评论
from cnblog.settings import EMAIL_HOST_USER
import threading
# send_mail(
# "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息
# content, # 邮件内容
# EMAIL_HOST_USER, # 发送方
# ['[email protected]'] # 接收方
# )
t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间
"您的文章%s新增了一条评论内容" % article_obj.title,
content,
EMAIL_HOST_USER,
['[email protected]']
))
t.start()
return JsonResponse(response)
def get_comment_tree(request):
article_id = request.GET.get('article_id')
# 转换成数组
ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid'))
return JsonResponse(ret, safe=False) # 传列表,需改成false
def backend(request, username):
# 当前用户文章列表
username = username
print(username+'456789')
article_list = Article.objects.filter(user__username=username)
return render(request, 'backend.html', locals())
def article_del(request):
"""
删除文章
:param request:
:return:
"""
username = request.POST.get('username')
article_id = request.POST.get('article_id')
Article.objects.filter(pk=article_id).delete()
Comment.objects.filter(article_id=article_id).delete()
return HttpResponse('删除成功!')
def article_edit(request, article_id):
"""
编辑修改某一篇文章
:param request:
:return:
"""
article_id = article_id
article_obj = Article.objects.filter(nid=article_id).first()
return render(request, 'article_edit.html', locals())
def article_update(request):
username = request.user.username
if request.method == 'POST':
article_id = request.POST.get('article_id')
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
@login_required
def add_article(request):
username = request.user.username
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.create(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
def upload(request):
"""
文章的图片上传
:param request:
:return:
"""
img = request.FILES.get('upload_img') # 读取上传的文件
path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径
with open(path, 'wb') as f: # 保存
for i in img:
f.write(i)
response = {
'error': 0,
'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览
}
return HttpResponse(json.dumps(response))
def logout(request): # 注销
auth.logout(request)
return redirect('/index/')
def login(request): # 登录
if request.method == 'POST':
user = request.POST.get('user')
pwd = request.POST.get('pwd')
validcode = request.POST.get('validcode') # 浏览器提交的
valid_code = request.session.get('valid_code') # 保存在服务器的
resopnse = {'user': None, 'msg': None}
if validcode.upper() == valid_code | render(request, 'article_detail.html', locals())
def digg(request):
"""
点赞
:param request:
:return:
"""
is_up = json.loads(request.POST.get('is_up')) # 反序列化
user_id = request.user.pk
article_id = request.POST.get('article_id')
obj = ArticleUpDown.objects | identifier_body |
views.py | ,t_articles,c_t_articles,articles
:param request:
:param username:
:return:
"""
user = UserInfo.objects.filter(username=username).first() # 当前对象
print('user', user)
if not user: # 判断是否已经存在
return render(request, 'not_exit.html', locals())
# 当前站点对象
blog = user.blog
# 查询当前站点的每一个分类名称以及对应文章数
c_articles = Category.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点的每一个标签名称以及对应文章数
t_articles = Tag.objects.filter(blog_id=blog.nid).values('title').annotate(c=Count('article__title')).values(
'title', 'c')
# 查询当前站点每一个年月以及对应文章数
c_t_articles = Article.objects.filter(user=user). \
extra(select={"c_date": "date_format(create_time,'%%Y-%%m')"}). \
values('c_date').annotate(c=Count('nid')).values('c_date', 'c')
articles = Article.objects.filter(user=user)
return user,c_articles,t_articles,c_t_articles,articles,blog
@login_required
def home_site(request, username, **kwargs): # 第三个形参是以字典形式接受多个参数
"""
个人站点
:param request:
:param username:
:return:
"""
if username == request.user.username:
user, c_articles, t_articles, c_t_articles, articles, blog = get_data(request, username)
if kwargs: # 个人站点跳转
condition = kwargs['condition']
param = kwargs['param']
if condition == 'category':
articles = Article.objects.filter(user=user).filter(category__title=param)
if condition == 'tag':
articles = Article.objects.filter(user=user).filter(tags__title=param)
if condition == 'archive':
year, month = param.split('-')
articles = Article.objects.filter(user=user)\
.filter(create_time__year=year, create_time__month=month) # USE_TZ = False
return render(request, 'home_site.html', locals())
else:
return redirect('/login/')
def article_detail(request, username, article_id):
"""
文章详情页
:param request:
:param username:
:param article_id:
:return:
"""
article_obj = Article.objects.filter(nid=article_id).first()
comments = Comment.objects.filter(article_id=article_id)
return render(request, 'article_detail.html', locals())
def digg(request):
"""
点赞
:param request:
:return:
"""
is_up = json.loads(request.POST.get('is_up')) # 反序列化
user_id = request.user.pk
article_id = request.POST.get('article_id')
obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first()
response = {'state': False, 'msg': None}
if not obj: # 该用户没对本文章进行操作
ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id)
queryset = Article.objects.filter(pk=article_id)
if is_up: # 更新文章的数据
queryset.update(up_count=F('up_count')+1)
else:
queryset.update(down_count=F('down_count')+1)
else:
response['state'] = True
if obj.is_up:
response['msg'] = '您已经点赞过!'
else:
response['msg'] = '您已经点踩过!'
return JsonResponse(response)
def comment(request):
article_id = request.POST.get('article_id')
pid = request.POST.get('pid')
content = request.POST.get('content')
user_id = request.user.pk
# 事务操作,必须同时成功,同时失败
with transaction.atomic():
ret = Comment.objects.create(user_id=user_id, content=content,
article_id=article_id, parent_comment_id=pid)
Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1)
# 构件根评论添加时所需数据
response = {}
response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X")
response['username'] = request.user.username
response['content'] = ret.content
article_obj = Article.objects.filter(nid=article_id).first()
# 给该文章作者发送邮件,通知其有人评论
from cnblog.settings import EMAIL_HOST_USER
import threading
# send_mail(
# "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息
# content, # 邮件内容
# EMAIL_HOST_USER, # 发送方
# ['[email protected]'] # 接收方
# )
t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间
"您的文章%s新增了一条评论内容" % article_obj.title,
content,
EMAIL_HOST_USER,
['[email protected]']
))
t.start()
return JsonResponse(response)
def get_comment_tree(request):
article_id = request.GET.get('article_id')
# 转换成数组
ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid'))
return JsonResponse(ret, safe=False) # 传列表,需改成false
def backend(request, username):
# 当前用户文章列表
username = username
print(username+'456789')
article_list = Article.objects.filter(user__username=username)
return render(request, 'backend.html', locals())
def article_del(request):
"""
删除文章
:param request:
:return:
"""
username = request.POST.get('username')
article_id = request.POST.get('article_id')
Article.objects.filter(pk=article_id).delete()
Comment.objects.filter(article_id=article_id).delete()
return HttpResponse('删除成功!')
| ticle_edit(request, article_id):
"""
编辑修改某一篇文章
:param request:
:return:
"""
article_id = article_id
article_obj = Article.objects.filter(nid=article_id).first()
return render(request, 'article_edit.html', locals())
def article_update(request):
username = request.user.username
if request.method == 'POST':
article_id = request.POST.get('article_id')
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
@login_required
def add_article(request):
username = request.user.username
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.create(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
def upload(request):
"""
文章的图片上传
:param request:
:return:
"""
img = request.FILES.get('upload_img') # 读取上传的文件
path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径
with open(path, 'wb') as f: # 保存
for i in img:
f.write(i)
response = {
'error': 0,
'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览
}
return HttpResponse(json.dumps(response))
def logout(request): # 注销
auth.logout(request)
return redirect('/index/')
def login(request): # 登录
if request.method == 'POST':
user = request.POST.get('user')
pwd = request.POST.get('pwd')
validcode = request.POST.get('validcode') # 浏览器提交的
valid_code = request.session.get('valid_code') # 保存在服务器的
resopnse = {'user': None, 'msg': None}
if validcode.upper() == valid |
def ar | identifier_name |
views.py | if condition == 'category':
articles = Article.objects.filter(user=user).filter(category__title=param)
if condition == 'tag':
articles = Article.objects.filter(user=user).filter(tags__title=param)
if condition == 'archive':
year, month = param.split('-')
articles = Article.objects.filter(user=user)\
.filter(create_time__year=year, create_time__month=month) # USE_TZ = False
return render(request, 'home_site.html', locals())
else:
return redirect('/login/')
def article_detail(request, username, article_id):
"""
文章详情页
:param request:
:param username:
:param article_id:
:return:
"""
article_obj = Article.objects.filter(nid=article_id).first()
comments = Comment.objects.filter(article_id=article_id)
return render(request, 'article_detail.html', locals())
def digg(request):
"""
点赞
:param request:
:return:
"""
is_up = json.loads(request.POST.get('is_up')) # 反序列化
user_id = request.user.pk
article_id = request.POST.get('article_id')
obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first()
response = {'state': False, 'msg': None}
if not obj: # 该用户没对本文章进行操作
ArticleUpDown.objects.create(is_up=is_up, article_id=article_id, user_id=user_id)
queryset = Article.objects.filter(pk=article_id)
if is_up: # 更新文章的数据
queryset.update(up_count=F('up_count')+1)
else:
queryset.update(down_count=F('down_count')+1)
else:
response['state'] = True
if obj.is_up:
response['msg'] = '您已经点赞过!'
else:
response['msg'] = '您已经点踩过!'
return JsonResponse(response)
def comment(request):
article_id = request.POST.get('article_id')
pid = request.POST.get('pid')
content = request.POST.get('content')
user_id = request.user.pk
# 事务操作,必须同时成功,同时失败
with transaction.atomic():
ret = Comment.objects.create(user_id=user_id, content=content,
article_id=article_id, parent_comment_id=pid)
Article.objects.filter(nid=article_id).update(comment_count=F('comment_count')+1)
# 构件根评论添加时所需数据
response = {}
response['create_time'] = ret.create_time.strftime("%Y-%m-%d %X")
response['username'] = request.user.username
response['content'] = ret.content
article_obj = Article.objects.filter(nid=article_id).first()
# 给该文章作者发送邮件,通知其有人评论
from cnblog.settings import EMAIL_HOST_USER
import threading
# send_mail(
# "您的文章%s新增了一条评论内容"%article_obj.title, # 提示信息
# content, # 邮件内容
# EMAIL_HOST_USER, # 发送方
# ['[email protected]'] # 接收方
# )
t = threading.Thread(target=send_mail, args=( # 开启线程,节省时间
"您的文章%s新增了一条评论内容" % article_obj.title,
content,
EMAIL_HOST_USER,
['[email protected]']
))
t.start()
return JsonResponse(response)
def get_comment_tree(request):
article_id = request.GET.get('article_id')
# 转换成数组
ret = list(Comment.objects.filter(article_id=article_id).values('pk', 'content', 'parent_comment__nid'))
return JsonResponse(ret, safe=False) # 传列表,需改成false
def backend(request, username):
# 当前用户文章列表
username = username
print(username+'456789')
article_list = Article.objects.filter(user__username=username)
return render(request, 'backend.html', locals())
def article_del(request):
"""
删除文章
:param request:
:return:
"""
username = request.POST.get('username')
article_id = request.POST.get('article_id')
Article.objects.filter(pk=article_id).delete()
Comment.objects.filter(article_id=article_id).delete()
return HttpResponse('删除成功!')
def article_edit(request, article_id):
"""
编辑修改某一篇文章
:param request:
:return:
"""
article_id = article_id
article_obj = Article.objects.filter(nid=article_id).first()
return render(request, 'article_edit.html', locals())
def article_update(request):
username = request.user.username
if request.method == 'POST':
article_id = request.POST.get('article_id')
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.filter(nid=article_id).update(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
@login_required
def add_article(request):
username = request.user.username
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
print('content', content)
# 提取文章描述信息desc
soup = BeautifulSoup(content, 'html.parser')
for tag in soup.find_all():
if tag.name == 'script':
tag.decompose() # 删除非法信息,防止xss攻击
desc = soup.text[0:150] +'' # 只提取150个字节的文本信息
Article.objects.create(title=title, content=content, user=request.user, desc=desc)
return redirect('/%s/backend/' % username)
return render(request, 'add_article.html', locals())
def upload(request):
"""
文章的图片上传
:param request:
:return:
"""
img = request.FILES.get('upload_img') # 读取上传的文件
path = os.path.join(MEDIA_ROOT, 'article_imgs', img.name) # 保存到的路径
with open(path, 'wb') as f: # 保存
for i in img:
f.write(i)
response = {
'error': 0,
'url': '/media/article_imgs/%s' % img.name # 返回图片地址,可以在编辑框预览
}
return HttpResponse(json.dumps(response))
def logout(request): # 注销
auth.logout(request)
return redirect('/index/')
def login(request): # 登录
if request.method == 'POST':
user = request.POST.get('user')
pwd = request.POST.get('pwd')
validcode = request.POST.get('validcode') # 浏览器提交的
valid_code = request.session.get('valid_code') # 保存在服务器的
resopnse = {'user': None, 'msg': None}
if validcode.upper() == valid_code.upper(): # 首先校验验证码,验证码不区分大小写
ret = auth.authenticate(username=user, password=pwd)
if ret: # 用户存在
auth.login(request, ret) # 当前登录对象
resopnse['user'] = user
else:
resopnse['msg'] = 'username or password is wromg!'
else:
resopnse['msg'] = 'valid code error!'
return JsonResponse(resopnse)
return render(request, 'login.html')
def get_validcode_img(request): # 生成随机验证码
data = get_validCode_img(request)
return HttpResponse(data)
def register(request):
"""
注册页面
:param request:
:return:
"""
if request.method == 'POST': # 或者if request.is_ajax 进行判断
form = User(request.POST) # 验证是否合要求
response = {'user': None, 'msg': None}
if form.is_valid(): # 信息正确,增加注册用户
response['user'] = form.cleaned_data.get('user')
user = form.cleaned_data.get('user')
pwd = form.cleaned_data.get('pwd')
email = form.cleaned_data.get('email')
head_obj = request.FILES.get('avatar') # 文件提取
extra = {} # 额外传的数据都打包成字典
if head_obj:
extra['avatar'] = head_obj
UserInfo.objects.create_user(username=user, password=pwd, email=email, **extra)
else:
response['msg'] = form.errors
return JsonResponse(response) # Ajax接受JSON文件
else:
form = User()
return render(request, 'reg.html', locals())
| conditional_block |
||
ui.rs | : i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if !self.can_read() || !self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) |
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ && !*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(& | {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
} | conditional_block |
ui.rs | : i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if !self.can_read() || !self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI |
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ && !*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(& | {
TestUI {
..Default::default()
}
} | identifier_body |
ui.rs | : i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if !self.can_read() || !self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn can_read(&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ && !*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
} | }
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self | }
printed_lines.extend(line_tuples);
Ok(()) | random_line_split |
ui.rs | : i32);
fn set_progress_enabled(&mut self, enabled: bool);
// Environment information
fn program_name(&self) -> &str;
// Write/Print interface
fn will_print(&self, verbosity: i32) -> bool;
fn print(&self, verbosity: i32, message: &str) -> Fallible<()>;
fn print_error(&self, err: &Error) -> Fallible<()>;
fn println_interactive(&self, message: &str) -> Fallible<()>;
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()>;
fn println(&self, verbosity: i32, message: &str) -> Fallible<()> {
self.print(verbosity, &format!("{}\n", message))
}
// Read interface
fn can_read(&self) -> bool;
fn read_prompt(&self, prompt: &str) -> Fallible<String>;
fn set_stdin_echo(&self, enable: bool);
fn read_prompt_bool(
&self,
verbosity: i32,
prompt: &str,
default: bool,
) -> Fallible<Option<bool>> {
if !self.can_read() || !self.will_print(verbosity) {
return Ok(None);
}
let yn_helper = if default { "[Y/n]" } else { "[y/N]" };
let prompt = format!("{} {}: ", prompt, yn_helper);
loop {
match self.read_prompt(&prompt)?.to_ascii_lowercase().as_str() {
"y" | "yes" => return Ok(Some(true)),
"n" | "no" => return Ok(Some(false)),
"" => return Ok(Some(default)),
_ => {
self.println_interactive("Invalid input, please enter 'y' or 'n'.")?;
}
}
}
}
fn read_password(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
self.set_stdin_echo(false);
let res = self.read_prompt(prompt);
self.set_stdin_echo(true);
// With echo off we don't get the newline character from input; we need to output it ourselves.
self.println_interactive("")?;
res
}
}
pub struct BasicUI {
program_name: String,
input: Rc<RefCell<Option<Reader>>>,
output: RefCell<Writer>,
input_is_tty: bool,
output_is_tty: bool,
verbosity: i32,
progress_enabled: bool,
}
impl BasicUI {
pub fn new(
program_name: String,
input: Reader,
input_is_tty: bool,
output: Writer,
output_is_tty: bool,
) -> BasicUI {
BasicUI {
program_name,
input: Rc::new(RefCell::new(Some(input))),
input_is_tty,
output: RefCell::new(output),
output_is_tty,
verbosity: 0,
progress_enabled: true,
}
}
// Create a function that extracts input stream from this struct, returning it to the caller.
// After returned function is called, this struct loses input stream and with it the ability to
// prompt user for input/passwords.
pub fn input_stream_extractor(&mut self) -> ReaderFactory {
let input = Rc::clone(&self.input);
Box::new(move || Ok(input.borrow_mut().take().unwrap()))
}
}
impl UI for BasicUI {
fn set_verbosity(&mut self, verbosity: i32) {
self.verbosity = verbosity;
}
fn set_progress_enabled(&mut self, enabled: bool) {
self.progress_enabled = enabled;
}
fn program_name(&self) -> &str {
&self.program_name
}
// Write interface
fn will_print(&self, verbosity: i32) -> bool {
verbosity <= self.verbosity
}
fn print(&self, verbosity: i32, message: &str) -> Fallible<()> {
if self.will_print(verbosity) {
self.output.borrow_mut().write_all(message.as_bytes())?;
}
Ok(())
}
fn print_error(&self, err: &Error) -> Fallible<()> {
if self.will_print(ERROR_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}: {}", self.program_name, err)?;
}
Ok(())
}
fn println_interactive(&self, message: &str) -> Fallible<()> {
if self.will_print(INTERACTIVE_VERBOSITY) {
writeln!(self.output.borrow_mut(), "{}", message)?;
}
Ok(())
}
fn println_progress(&self, verbosity: i32, message: &str, finish: bool) -> Fallible<()> {
if self.progress_enabled {
let last_char = if finish { "\n" } else { "\r" };
let message = format!("{}{}{}", TERMINAL_CLEAR_LINE, message, last_char);
self.print(verbosity, &message)?;
}
Ok(())
}
// Read interface
fn | (&self) -> bool {
self.input.borrow().is_some()
&& self.input_is_tty
&& self.output_is_tty
&& self.will_print(INTERACTIVE_VERBOSITY)
}
fn read_prompt(&self, prompt: &str) -> Fallible<String> {
ensure!(self.can_read(), "Can't read from a non-TTY input");
let mut output = self.output.borrow_mut();
let mut input = RefMut::map(self.input.borrow_mut(), |i| i.as_mut().unwrap());
write!(output, "{}", prompt)?;
// Read from stdin byte-by-byte and convert them to utf8 characters, stopping at '\n'.
let mut char_bytes = vec![];
let mut res = String::new();
for byte in input.by_ref().bytes() {
char_bytes.push(byte?);
match std::str::from_utf8(&char_bytes) {
Ok(valid_char) => {
match valid_char {
"\n" => {
if res.ends_with('\r') {
res.pop(); // Handle Windows CRLF.
}
return Ok(res);
}
valid_char => res.push_str(valid_char),
}
char_bytes.clear();
}
Err(utf_err) => match utf_err.error_len() {
None => (), // Incomplete character - get more bytes.
Some(_) => bail!(
"Error reading from stdin: Non-UTF8 byte sequence encountered: {}",
to_hex_string(char_bytes)
),
},
}
}
Err(format_err!("Error reading from stdin: EOF"))
}
fn set_stdin_echo(&self, enable: bool) {
set_stdin_echo(enable);
}
}
#[cfg(test)]
pub mod test_helpers {
use super::*;
use std::collections::VecDeque;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum PrintType {
Log { verbosity: i32 },
Error,
Interactive,
Progress { verbosity: i32, finish: bool },
}
#[derive(Default)]
pub struct TestUI {
pub prompt_replies: RefCell<VecDeque<(Option<String>, Result<String, Error>)>>,
pub printed_lines: RefCell<VecDeque<(PrintType, String, bool)>>,
}
impl TestUI {
pub fn new() -> TestUI {
TestUI {
..Default::default()
}
}
pub fn expect_prompt(
self,
matcher: impl AsRef<str>,
reply: Result<impl AsRef<str>, Error>,
) -> Self {
self.prompt_replies.borrow_mut().push_back((
Some(matcher.as_ref().to_string()),
reply.map(|s| s.as_ref().to_string()),
));
self
}
pub fn expect_all_prompts_asked(&self) {
assert_eq!(self.prompt_replies.borrow_mut().len(), 0);
}
fn append_printed_lines(&self, typ: PrintType, message: impl AsRef<str>) -> Fallible<()> {
let message = message.as_ref();
let lines = message.lines().collect::<Vec<_>>();
let lines_len = lines.len();
let mut line_tuples = lines.into_iter().enumerate().map(|(idx, line)| {
let line_finished = idx < lines_len - 1 || message.ends_with('\n');
(typ, line.to_string(), line_finished)
});
let mut printed_lines = self.printed_lines.borrow_mut();
// Append to last line if it has the same type
if let Some((last_typ, last_line, last_line_finished)) = printed_lines.back_mut() {
if *last_typ == typ && !*last_line_finished {
if let Some((_, line, finished)) = line_tuples.next() {
last_line.push_str(&line);
*last_line_finished = finished;
}
}
}
printed_lines.extend(line_tuples);
Ok(())
}
}
impl UI for TestUI {
fn set_verbosity(&mut self, _verbosity: i32) {}
fn set_progress_enabled(&mut self, _enabled: bool) {}
fn program_name(&self) -> &str {
"rypt"
}
// Write interface
fn will_print(&self, _verbosity: i32) -> bool {
true
}
fn print(&self | can_read | identifier_name |
electronics.py | -connector.length
for z in posses:
self.surface.blit(con2,(z,0))#top
self.surface.blit(con4,(z,self.dis))#bottom
self.surface.blit(con3, (0, z))#left
self.surface.blit(con1, (self.dis, z))#right
self.interfaces = posses#attachement nodes for circuit
def get_interfaces(self, x,y):
sides = {}
sides["left"] = [vec2d(x, y+z) for z in self.interfaces]
sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces]
sides["top"] = [vec2d(x+z, y) for z in self.interfaces]
sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces]
return sides
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, surface, connection, speed = 1):
self.connection = connection
self.surface = surface
self.pos = connection.start
self.direction = self.end-self.start
self.time = connection.time
class AnimFizzle():
def __init__(self, grid,amount, speed, color = (250,250,100)):
self.grid = grid
connections = []
for node in grid.nodes.values():
connections.extend(node.connections)
for c in connections:
c.direction.length = speed
c.scale_time(speed)
fizimage = P.image.load(join("Circuit","blib.png"))
blitter = P.Surface(fizimage.get_size())
blitter.fill(color)
fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT)
self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)]
def render(self,surface):
copy = self.grid.surface.copy()
rects = [f.render(copy) for f in self.fizzles]
surface.blit(copy, (0,0))
return rects
Fi = 0
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, connection, surface):
global Fi
self.follow(connection)
self.surface = surface
self.fi = Fi
Fi += 1
def follow(self, connection):
self.connection = connection
self.direction = connection.direction
self.pos = vec2d(connection.start)
self.time = connection.time
def render(self, target):
self.time -= 1
if self.time <= 0:
self.follow(choice(self.connection.node.connections))
self.pos += self.direction
target.blit(self.surface, self.pos)
class Grid():
delta = vec2d(-1,-1)
class Node():
def __init__(self, position):
self.position = position
self.connections = []
def __repr__(self):
return "Node(%s,%s)" % self.position
class Connection():
def __init__(self, start, end, node):
self.start = start+Grid.delta
self.end = end+Grid.delta
self.direction = end-start
self.node = node
def scale_time(self, speed):
self.time = (self.end-self.start).length/self.direction.length
def __init__(self, size, chip, connector, positions, tilemap):
self.size = size
self.chip = chip
self.length = connector.width+connector.spacing
self.surface = P.Surface(size)
self.surface.fill(tilemap.basecolor)
chiplength = self.chip.surface.get_width()
xshift = chiplength//2
self.chipposs = []
levels = set()
rows = set()
barrows = []
barlines = []
self.nodes = {}
interfaces = {}
outsidenode = self.Node((None,None))
for x,y in positions:
x,y = pos = (x-xshift, y-xshift)
self.chipposs.append(pos)
self.nodes[(x,y)] = self.Node((x,y))
interfaces[(x,y)] = chip.get_interfaces(x,y)
if y not in levels:
levels.add(y)
barlines += (y+interface for interface in chip.interfaces)
if x not in rows:
rows.add(x)
barrows += (x+interface for interface in chip.interfaces)
minstraight = 21
#####Diagonals#####
X = min(rows)
XR = max(rows)+chiplength
for y in levels:#left and right endconnectors
ys = chip.interfaces
if y == min(levels):spec = X-minstraight
elif y == max(levels):spec = -X+minstraight
else:spec = 0
for yl in ys:
yt = yl+y+spec
tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1))
tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1))
Y = min(levels)
for x in rows:
if x == min(rows):spec = 0#spec = Y-minstraight
elif x == max(rows):spec = 0#spec = -Y+minstraight
else:
if x > size[0]//2:spec = Y-minstraight
else:spec = -Y+minstraight
for xl in ys:
xt = xl+x+spec
tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0))
######Straight Connections######
xs = min(rows)
xe = max(rows)+chiplength
bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5))
for y in barlines:
|
ys = min(levels)
ye = size[1]#max(levels)+chiplength
bar = repeat(tilemap["v"], (5, ye-ys+minstraight))
for x in barlines:
self.surface.blit(bar, (x-1, ys-minstraight))
######Chips######
[self.surface.blit(chip.surface, pos) for pos in self.chipposs]
##################Fizzle Logic######################
xs = list(rows)
xs.sort()
ys = list(levels)
ys.sort()
for x in rows:
cons = [False, False]
if xs[0] != x:#not left end
leftx = xs[xs.index(x)-1]
cons[0] = True
if xs[-1] != x:#not right end
rightx = xs[xs.index(x)+1]
cons[1] = True
for y in levels:
localnode = self.nodes[(x,y)]
if cons[0]:
leftnode = self.nodes[(leftx, y)]
for left, right in zip(interfaces[(leftx, y)]["right"],
interfaces[(x,y)]["left"]):
localnode.connections.append(self.Connection(right, left, leftnode))
if cons[1]:
rightnode = self.nodes[(rightx, y)]
for right, left in zip(interfaces[(rightx, y)]["left"],
interfaces[(x,y)]["right"]):
localnode.connections.append(self.Connection(left, right, rightnode))
if ys[-1] != y:
downy = ys[ys.index(y)+1]
downnode = self.nodes[(x, downy)]
for down, up in zip(interfaces[(x,downy)]["top"],
interfaces[(x,y)]["bottom"]):
localnode.connections.append(self.Connection(up,down, downnode))
if ys[0] != y:
upy = ys[ys.index(y)-1]
upnode = self.nodes[(x, upy)]
for up, down in zip(interfaces[(x,upy)]["bottom"],
interfaces[(x,y)]["top"]):
localnode.connections.append(self.Connection(down,up, upnode))
class TileMap():
def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)):
self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)])
self.o = o = P.Color(*outercolor)
self.i = i = P.Color(*innercolor)
self.basecolor = outercolor
size = 5,5
l = 5
self.tiles = {}
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[x] = color
self.tiles["v"] = S#vertical
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[:, x] = color
self.tiles["h"] = S#horizontal
def __getitem__(self, key):
return self.tiles[key]
def save_images(self):
for name, surface in self.tiles.items():
P.image.save(surface, | self.surface.blit(bar, (xs-minstraight, y-1)) | conditional_block |
electronics.py | -connector.length
for z in posses:
self.surface.blit(con2,(z,0))#top
self.surface.blit(con4,(z,self.dis))#bottom
self.surface.blit(con3, (0, z))#left
self.surface.blit(con1, (self.dis, z))#right
self.interfaces = posses#attachement nodes for circuit
def get_interfaces(self, x,y):
sides = {}
sides["left"] = [vec2d(x, y+z) for z in self.interfaces]
sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces]
sides["top"] = [vec2d(x+z, y) for z in self.interfaces]
sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces]
return sides
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, surface, connection, speed = 1):
self.connection = connection
self.surface = surface
self.pos = connection.start
self.direction = self.end-self.start
self.time = connection.time
class AnimFizzle():
def __init__(self, grid,amount, speed, color = (250,250,100)):
self.grid = grid
connections = []
for node in grid.nodes.values():
connections.extend(node.connections)
for c in connections:
c.direction.length = speed
c.scale_time(speed)
fizimage = P.image.load(join("Circuit","blib.png"))
blitter = P.Surface(fizimage.get_size())
blitter.fill(color)
fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT)
self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)]
def render(self,surface):
copy = self.grid.surface.copy()
rects = [f.render(copy) for f in self.fizzles]
surface.blit(copy, (0,0))
return rects
Fi = 0
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, connection, surface):
global Fi
self.follow(connection)
self.surface = surface
self.fi = Fi
Fi += 1
def follow(self, connection):
self.connection = connection
self.direction = connection.direction
self.pos = vec2d(connection.start)
self.time = connection.time
def render(self, target):
self.time -= 1
if self.time <= 0:
self.follow(choice(self.connection.node.connections))
self.pos += self.direction
target.blit(self.surface, self.pos)
class Grid():
delta = vec2d(-1,-1)
class Node():
def __init__(self, position):
self.position = position
self.connections = []
def __repr__(self):
return "Node(%s,%s)" % self.position
class Connection():
def __init__(self, start, end, node):
self.start = start+Grid.delta
self.end = end+Grid.delta
self.direction = end-start
self.node = node
def scale_time(self, speed):
self.time = (self.end-self.start).length/self.direction.length
def __init__(self, size, chip, connector, positions, tilemap):
self.size = size
self.chip = chip
self.length = connector.width+connector.spacing
self.surface = P.Surface(size)
self.surface.fill(tilemap.basecolor)
chiplength = self.chip.surface.get_width()
xshift = chiplength//2
self.chipposs = []
levels = set()
rows = set()
barrows = []
barlines = []
self.nodes = {}
interfaces = {}
outsidenode = self.Node((None,None))
for x,y in positions:
x,y = pos = (x-xshift, y-xshift)
self.chipposs.append(pos)
self.nodes[(x,y)] = self.Node((x,y))
interfaces[(x,y)] = chip.get_interfaces(x,y)
if y not in levels:
levels.add(y)
barlines += (y+interface for interface in chip.interfaces)
if x not in rows:
rows.add(x)
barrows += (x+interface for interface in chip.interfaces)
minstraight = 21
#####Diagonals#####
X = min(rows)
XR = max(rows)+chiplength
for y in levels:#left and right endconnectors
ys = chip.interfaces
if y == min(levels):spec = X-minstraight
elif y == max(levels):spec = -X+minstraight
else:spec = 0
for yl in ys:
yt = yl+y+spec
tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1))
tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1))
Y = min(levels)
for x in rows:
if x == min(rows):spec = 0#spec = Y-minstraight
elif x == max(rows):spec = 0#spec = -Y+minstraight
else:
if x > size[0]//2:spec = Y-minstraight
else:spec = -Y+minstraight
for xl in ys:
xt = xl+x+spec
tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0))
######Straight Connections######
xs = min(rows)
xe = max(rows)+chiplength
bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5))
for y in barlines:
self.surface.blit(bar, (xs-minstraight, y-1))
ys = min(levels)
ye = size[1]#max(levels)+chiplength
bar = repeat(tilemap["v"], (5, ye-ys+minstraight))
for x in barlines:
self.surface.blit(bar, (x-1, ys-minstraight))
######Chips######
[self.surface.blit(chip.surface, pos) for pos in self.chipposs]
##################Fizzle Logic######################
xs = list(rows)
xs.sort()
ys = list(levels)
ys.sort()
for x in rows:
cons = [False, False]
if xs[0] != x:#not left end
leftx = xs[xs.index(x)-1]
cons[0] = True
if xs[-1] != x:#not right end
rightx = xs[xs.index(x)+1]
cons[1] = True
for y in levels:
localnode = self.nodes[(x,y)]
if cons[0]:
leftnode = self.nodes[(leftx, y)]
for left, right in zip(interfaces[(leftx, y)]["right"],
interfaces[(x,y)]["left"]):
localnode.connections.append(self.Connection(right, left, leftnode))
if cons[1]:
rightnode = self.nodes[(rightx, y)]
for right, left in zip(interfaces[(rightx, y)]["left"],
interfaces[(x,y)]["right"]):
localnode.connections.append(self.Connection(left, right, rightnode))
if ys[-1] != y:
downy = ys[ys.index(y)+1]
downnode = self.nodes[(x, downy)]
for down, up in zip(interfaces[(x,downy)]["top"],
interfaces[(x,y)]["bottom"]):
localnode.connections.append(self.Connection(up,down, downnode))
if ys[0] != y:
upy = ys[ys.index(y)-1]
upnode = self.nodes[(x, upy)]
for up, down in zip(interfaces[(x,upy)]["bottom"],
interfaces[(x,y)]["top"]):
localnode.connections.append(self.Connection(down,up, upnode))
class TileMap():
def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)):
self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)])
self.o = o = P.Color(*outercolor)
self.i = i = P.Color(*innercolor)
self.basecolor = outercolor
size = 5,5
l = 5
self.tiles = {}
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[x] = color
self.tiles["v"] = S#vertical
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[:, x] = color
self.tiles["h"] = S#horizontal | def save_images(self):
for name, surface in self.tiles.items():
P.image.save(surface, "_ |
def __getitem__(self, key):
return self.tiles[key]
| random_line_split |
electronics.py |
con1,con2,con3,con4 = connector.surfaces
posses = tuple(range(connector.indent+rest//2, connector.indent+innerlength-rest//2, ele))
self.dis = length-connector.length
for z in posses:
self.surface.blit(con2,(z,0))#top
self.surface.blit(con4,(z,self.dis))#bottom
self.surface.blit(con3, (0, z))#left
self.surface.blit(con1, (self.dis, z))#right
self.interfaces = posses#attachement nodes for circuit
def get_interfaces(self, x,y):
sides = {}
sides["left"] = [vec2d(x, y+z) for z in self.interfaces]
sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces]
sides["top"] = [vec2d(x+z, y) for z in self.interfaces]
sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces]
return sides
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, surface, connection, speed = 1):
self.connection = connection
self.surface = surface
self.pos = connection.start
self.direction = self.end-self.start
self.time = connection.time
class AnimFizzle():
def __init__(self, grid,amount, speed, color = (250,250,100)):
self.grid = grid
connections = []
for node in grid.nodes.values():
connections.extend(node.connections)
for c in connections:
c.direction.length = speed
c.scale_time(speed)
fizimage = P.image.load(join("Circuit","blib.png"))
blitter = P.Surface(fizimage.get_size())
blitter.fill(color)
fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT)
self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)]
def render(self,surface):
copy = self.grid.surface.copy()
rects = [f.render(copy) for f in self.fizzles]
surface.blit(copy, (0,0))
return rects
Fi = 0
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, connection, surface):
global Fi
self.follow(connection)
self.surface = surface
self.fi = Fi
Fi += 1
def follow(self, connection):
self.connection = connection
self.direction = connection.direction
self.pos = vec2d(connection.start)
self.time = connection.time
def render(self, target):
self.time -= 1
if self.time <= 0:
self.follow(choice(self.connection.node.connections))
self.pos += self.direction
target.blit(self.surface, self.pos)
class Grid():
delta = vec2d(-1,-1)
class Node():
def __init__(self, position):
self.position = position
self.connections = []
def __repr__(self):
return "Node(%s,%s)" % self.position
class Connection():
def __init__(self, start, end, node):
self.start = start+Grid.delta
self.end = end+Grid.delta
self.direction = end-start
self.node = node
def scale_time(self, speed):
self.time = (self.end-self.start).length/self.direction.length
def __init__(self, size, chip, connector, positions, tilemap):
self.size = size
self.chip = chip
self.length = connector.width+connector.spacing
self.surface = P.Surface(size)
self.surface.fill(tilemap.basecolor)
chiplength = self.chip.surface.get_width()
xshift = chiplength//2
self.chipposs = []
levels = set()
rows = set()
barrows = []
barlines = []
self.nodes = {}
interfaces = {}
outsidenode = self.Node((None,None))
for x,y in positions:
x,y = pos = (x-xshift, y-xshift)
self.chipposs.append(pos)
self.nodes[(x,y)] = self.Node((x,y))
interfaces[(x,y)] = chip.get_interfaces(x,y)
if y not in levels:
levels.add(y)
barlines += (y+interface for interface in chip.interfaces)
if x not in rows:
rows.add(x)
barrows += (x+interface for interface in chip.interfaces)
minstraight = 21
#####Diagonals#####
X = min(rows)
XR = max(rows)+chiplength
for y in levels:#left and right endconnectors
ys = chip.interfaces
if y == min(levels):spec = X-minstraight
elif y == max(levels):spec = -X+minstraight
else:spec = 0
for yl in ys:
yt = yl+y+spec
tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1))
tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1))
Y = min(levels)
for x in rows:
if x == min(rows):spec = 0#spec = Y-minstraight
elif x == max(rows):spec = 0#spec = -Y+minstraight
else:
if x > size[0]//2:spec = Y-minstraight
else:spec = -Y+minstraight
for xl in ys:
xt = xl+x+spec
tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0))
######Straight Connections######
xs = min(rows)
xe = max(rows)+chiplength
bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5))
for y in barlines:
self.surface.blit(bar, (xs-minstraight, y-1))
ys = min(levels)
ye = size[1]#max(levels)+chiplength
bar = repeat(tilemap["v"], (5, ye-ys+minstraight))
for x in barlines:
self.surface.blit(bar, (x-1, ys-minstraight))
######Chips######
[self.surface.blit(chip.surface, pos) for pos in self.chipposs]
##################Fizzle Logic######################
xs = list(rows)
xs.sort()
ys = list(levels)
ys.sort()
for x in rows:
cons = [False, False]
if xs[0] != x:#not left end
leftx = xs[xs.index(x)-1]
cons[0] = True
if xs[-1] != x:#not right end
rightx = xs[xs.index(x)+1]
cons[1] = True
for y in levels:
localnode = self.nodes[(x,y)]
if cons[0]:
leftnode = self.nodes[(leftx, y)]
for left, right in zip(interfaces[(leftx, y)]["right"],
interfaces[(x,y)]["left"]):
localnode.connections.append(self.Connection(right, left, leftnode))
if cons[1]:
rightnode = self.nodes[(rightx, y)]
for right, left in zip(interfaces[(rightx, y)]["left"],
interfaces[(x,y)]["right"]):
localnode.connections.append(self.Connection(left, right, rightnode))
if ys[-1] != y:
downy = ys[ys.index(y)+1]
downnode = self.nodes[(x, downy)]
for down, up in zip(interfaces[(x,downy)]["top"],
interfaces[(x,y)]["bottom"]):
localnode.connections.append(self.Connection(up,down, downnode))
if ys[0] != y:
upy = ys[ys.index(y)-1]
upnode = self.nodes[(x, upy)]
for up | def __init__(self, length, connector, innercolor = P.Color(50,50,50), deviation = 3, bordercolor = P.Color(150,150,150)):
size = length,length
rect = P.Rect((0,0), size)
innerrect = rect.inflate(-connector.indent*2,-connector.indent*2)
self.surface = P.Surface(size, flags = P.SRCALPHA)
self.surface.fill((0,0,0,0))
insurface = self.surface.subsurface(innerrect)#P.Surface(self.innerrect.size)
for x in range(insurface.get_width()):
for y in range(insurface.get_height()):
insurface.set_at((x,y),[z+randint(-deviation, deviation) for z in innercolor[:3]])
innerlength = innerrect.width
ele = (connector.spacing+connector.width)
slots = innerlength//ele-2
filled = slots*ele-connector.spacing
rest = innerlength - filled
if rest%2:
print("Warning|electronics.py:could not center chip connectors, change chip size.") | identifier_body |
|
electronics.py | -connector.length
for z in posses:
self.surface.blit(con2,(z,0))#top
self.surface.blit(con4,(z,self.dis))#bottom
self.surface.blit(con3, (0, z))#left
self.surface.blit(con1, (self.dis, z))#right
self.interfaces = posses#attachement nodes for circuit
def get_interfaces(self, x,y):
sides = {}
sides["left"] = [vec2d(x, y+z) for z in self.interfaces]
sides["right"] = [vec2d(self.dis+x, y+z) for z in self.interfaces]
sides["top"] = [vec2d(x+z, y) for z in self.interfaces]
sides["bottom"] = [vec2d(x+z, y+self.dis) for z in self.interfaces]
return sides
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, surface, connection, speed = 1):
self.connection = connection
self.surface = surface
self.pos = connection.start
self.direction = self.end-self.start
self.time = connection.time
class | ():
def __init__(self, grid,amount, speed, color = (250,250,100)):
self.grid = grid
connections = []
for node in grid.nodes.values():
connections.extend(node.connections)
for c in connections:
c.direction.length = speed
c.scale_time(speed)
fizimage = P.image.load(join("Circuit","blib.png"))
blitter = P.Surface(fizimage.get_size())
blitter.fill(color)
fizimage.blit(blitter, (0,0), special_flags = P.BLEND_MULT)
self.fizzles = [Fizzle(choice(connections),fizimage) for _ in range(amount)]
def render(self,surface):
copy = self.grid.surface.copy()
rects = [f.render(copy) for f in self.fizzles]
surface.blit(copy, (0,0))
return rects
Fi = 0
class Fizzle():
"""electric fizzle on the Grid"""
def __init__(self, connection, surface):
global Fi
self.follow(connection)
self.surface = surface
self.fi = Fi
Fi += 1
def follow(self, connection):
self.connection = connection
self.direction = connection.direction
self.pos = vec2d(connection.start)
self.time = connection.time
def render(self, target):
self.time -= 1
if self.time <= 0:
self.follow(choice(self.connection.node.connections))
self.pos += self.direction
target.blit(self.surface, self.pos)
class Grid():
delta = vec2d(-1,-1)
class Node():
def __init__(self, position):
self.position = position
self.connections = []
def __repr__(self):
return "Node(%s,%s)" % self.position
class Connection():
def __init__(self, start, end, node):
self.start = start+Grid.delta
self.end = end+Grid.delta
self.direction = end-start
self.node = node
def scale_time(self, speed):
self.time = (self.end-self.start).length/self.direction.length
def __init__(self, size, chip, connector, positions, tilemap):
self.size = size
self.chip = chip
self.length = connector.width+connector.spacing
self.surface = P.Surface(size)
self.surface.fill(tilemap.basecolor)
chiplength = self.chip.surface.get_width()
xshift = chiplength//2
self.chipposs = []
levels = set()
rows = set()
barrows = []
barlines = []
self.nodes = {}
interfaces = {}
outsidenode = self.Node((None,None))
for x,y in positions:
x,y = pos = (x-xshift, y-xshift)
self.chipposs.append(pos)
self.nodes[(x,y)] = self.Node((x,y))
interfaces[(x,y)] = chip.get_interfaces(x,y)
if y not in levels:
levels.add(y)
barlines += (y+interface for interface in chip.interfaces)
if x not in rows:
rows.add(x)
barrows += (x+interface for interface in chip.interfaces)
minstraight = 21
#####Diagonals#####
X = min(rows)
XR = max(rows)+chiplength
for y in levels:#left and right endconnectors
ys = chip.interfaces
if y == min(levels):spec = X-minstraight
elif y == max(levels):spec = -X+minstraight
else:spec = 0
for yl in ys:
yt = yl+y+spec
tilemap.draw_line(self.surface, (X-minstraight,yl+y+1), (0,yt+1))
tilemap.draw_line(self.surface, (XR+minstraight,yl+y+1), (size[0],yt+1))
Y = min(levels)
for x in rows:
if x == min(rows):spec = 0#spec = Y-minstraight
elif x == max(rows):spec = 0#spec = -Y+minstraight
else:
if x > size[0]//2:spec = Y-minstraight
else:spec = -Y+minstraight
for xl in ys:
xt = xl+x+spec
tilemap.draw_line(self.surface, (x+xl+1, Y-minstraight),(xt+1, 0))
######Straight Connections######
xs = min(rows)
xe = max(rows)+chiplength
bar = repeat(tilemap["h"], (xe-xs+2*minstraight, 5))
for y in barlines:
self.surface.blit(bar, (xs-minstraight, y-1))
ys = min(levels)
ye = size[1]#max(levels)+chiplength
bar = repeat(tilemap["v"], (5, ye-ys+minstraight))
for x in barlines:
self.surface.blit(bar, (x-1, ys-minstraight))
######Chips######
[self.surface.blit(chip.surface, pos) for pos in self.chipposs]
##################Fizzle Logic######################
xs = list(rows)
xs.sort()
ys = list(levels)
ys.sort()
for x in rows:
cons = [False, False]
if xs[0] != x:#not left end
leftx = xs[xs.index(x)-1]
cons[0] = True
if xs[-1] != x:#not right end
rightx = xs[xs.index(x)+1]
cons[1] = True
for y in levels:
localnode = self.nodes[(x,y)]
if cons[0]:
leftnode = self.nodes[(leftx, y)]
for left, right in zip(interfaces[(leftx, y)]["right"],
interfaces[(x,y)]["left"]):
localnode.connections.append(self.Connection(right, left, leftnode))
if cons[1]:
rightnode = self.nodes[(rightx, y)]
for right, left in zip(interfaces[(rightx, y)]["left"],
interfaces[(x,y)]["right"]):
localnode.connections.append(self.Connection(left, right, rightnode))
if ys[-1] != y:
downy = ys[ys.index(y)+1]
downnode = self.nodes[(x, downy)]
for down, up in zip(interfaces[(x,downy)]["top"],
interfaces[(x,y)]["bottom"]):
localnode.connections.append(self.Connection(up,down, downnode))
if ys[0] != y:
upy = ys[ys.index(y)-1]
upnode = self.nodes[(x, upy)]
for up, down in zip(interfaces[(x,upy)]["bottom"],
interfaces[(x,y)]["top"]):
localnode.connections.append(self.Connection(down,up, upnode))
class TileMap():
def __init__(self, outercolor = (10,10,150), innercolor = (250,250,250)):
self.m = m = P.Color(*[(x+y)//2 for x,y in zip(innercolor, outercolor)])
self.o = o = P.Color(*outercolor)
self.i = i = P.Color(*innercolor)
self.basecolor = outercolor
size = 5,5
l = 5
self.tiles = {}
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[x] = color
self.tiles["v"] = S#vertical
S = P.Surface(size)
PA = P.PixelArray(S)
for x,color in zip(range(5), (o,m,i,m,o)):
PA[:, x] = color
self.tiles["h"] = S#horizontal
def __getitem__(self, key):
return self.tiles[key]
def save_images(self):
for name, surface in self.tiles.items():
P.image.save(surface, | AnimFizzle | identifier_name |
SL1_ImportData.py | -----------<<< Setting constant values that are to be used inside function >>>----------- #
DatasetName = config['BigQueryConfig']['DatasetName']
SIDs = ast.literal_eval(config['DomainConfig']['SIDs'])
DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay']
LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1)
LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3)
if DataGrabMethodology == 'static':
Dates = ast.literal_eval(config['IfStatic']['Date'])
StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days'])
elif DataGrabMethodology == 'dynamic':
DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr']))
else:
txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".'
AddRecommendation(txt, config)
raise Exception(txt)
# -----------------------------<<< Generating Table Names >>>------------------------------ #
## Generating Table Names
if DataGrabMethodology == 'static':
if StaDataWindow != '-':
CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2]))
format = '%d%m%y'
Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ]
TableToInclude = ''
for i in range(len(SIDs)):
for j in range(len(Dates)):
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),'
elif DataGrabMethodology == 'dynamic':
CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC
TableDateToTake = []
while DynDataWindow >= -1: ## -1 to even include the current hour table
tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0)
TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H'))
DynDataWindow -= 1
TableToInclude, TableCnt = '', 0
for i in range(len(SIDs)):
for j in range(len(TableDateToTake)):
TableCnt += 0
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),'
LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',3,1)
return TableToInclude
# ------------------------------------------------------------------------------------------- #
def GrabAnySizeDatafromGoogleBQ(config):
| for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit
ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600)
ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1)
GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i)
# ------------------------<<< Reading Query From External File >>>------------------------- #
LevBasedPrint('Read from a locally saved Query File', 2)
queryfile = open(BQ_QueryFile, 'r')
query = queryfile.read()
queryfile.close()
# --------------------<<< Importing Data in Max possible batch size >>>-------------------- #
## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered
## GP
start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract
ratio = 1/LimitDecreaseFactor
limit = 1000 ## util which pt to try to gather the data ## Hardcoded
length = 1000
# query='''SELECT 1 limit {lim} offset {off}'''
DF = pd.DataFrame()
##GP
for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]:
if DF.shape == (0, 0):
try:
offcurr = 0
while offcurr < start:
LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2)
QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr))
tempDF = Exec_BQ(QueryToUse, BQ_Cred)
DF = DF.append(tempDF, ignore_index = True)
offcurr += i
except Exception as error:
txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error)
LevBasedPrint(txt, 2)
AddRecommendation(txt, config)
# raise Exception(txt)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',2,1)
return DF
# ------------------------------------------------------------------------------------------- #
# -------------------------------------------------- ImportData --------------------------------------------------- #
def ImportData(config):
"""
Can be used to import data from either storage or BQ
Extracts any size data from any SID of any number of days.
Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest'
'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling
'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations
"""
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
AccessDataFrom = config['DataCollection']['GetDataFrom']
if AccessDataFrom == 'BQ':
SettingToUse = config['IterationAim']['CycleType']
if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr'])
FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse)
GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData']
elif AccessDataFrom == 'Storage':
FileName = config['InputPaths']['Storage_RawData']
else:
print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom))
txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".'
AddRecommendation(txt, config)
raise Exception(txt)
LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1)
LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1)
# ----------------------------<<< Accessing Data from BQ >>>------------------------------- #
if AccessDataFrom == 'BQ':
# -----------------------<<< Setting Configuration for GlTest >>>-------------------------- #
if(SettingToUse == 'GlTest'):
| '''
Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks
'''
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
ModuleSetting = config['Config']['ModuleSettingRuleName']
BQ_Cred = config['BigQueryConfig']['ProjectID']
if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr'])
BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery']
LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart']
LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor'])
LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1)
# -------------------------<<< Generating Tables Name To Query >>>------------------------- #
TableToInclude = GenerateTableNames(config)
#print(TableToInclude)
# -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ #
## Getting the string that will be used to create bins for grouping based on a certain TimePeriod
GroupsToInclude = ''
if ModuleSetting == 'ICLSSTA': | identifier_body |
SL1_ImportData.py | -----------<<< Setting constant values that are to be used inside function >>>----------- #
DatasetName = config['BigQueryConfig']['DatasetName']
SIDs = ast.literal_eval(config['DomainConfig']['SIDs']) | StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days'])
elif DataGrabMethodology == 'dynamic':
DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr']))
else:
txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".'
AddRecommendation(txt, config)
raise Exception(txt)
# -----------------------------<<< Generating Table Names >>>------------------------------ #
## Generating Table Names
if DataGrabMethodology == 'static':
if StaDataWindow != '-':
CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2]))
format = '%d%m%y'
Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ]
TableToInclude = ''
for i in range(len(SIDs)):
for j in range(len(Dates)):
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),'
elif DataGrabMethodology == 'dynamic':
CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC
TableDateToTake = []
while DynDataWindow >= -1: ## -1 to even include the current hour table
tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0)
TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H'))
DynDataWindow -= 1
TableToInclude, TableCnt = '', 0
for i in range(len(SIDs)):
for j in range(len(TableDateToTake)):
TableCnt += 0
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),'
LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',3,1)
return TableToInclude
# ------------------------------------------------------------------------------------------- #
def GrabAnySizeDatafromGoogleBQ(config):
'''
Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks
'''
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
ModuleSetting = config['Config']['ModuleSettingRuleName']
BQ_Cred = config['BigQueryConfig']['ProjectID']
if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr'])
BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery']
LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart']
LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor'])
LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1)
# -------------------------<<< Generating Tables Name To Query >>>------------------------- #
TableToInclude = GenerateTableNames(config)
#print(TableToInclude)
# -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ #
## Getting the string that will be used to create bins for grouping based on a certain TimePeriod
GroupsToInclude = ''
if ModuleSetting == 'ICLSSTA':
for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit
ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600)
ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1)
GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i)
# ------------------------<<< Reading Query From External File >>>------------------------- #
LevBasedPrint('Read from a locally saved Query File', 2)
queryfile = open(BQ_QueryFile, 'r')
query = queryfile.read()
queryfile.close()
# --------------------<<< Importing Data in Max possible batch size >>>-------------------- #
## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered
## GP
start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract
ratio = 1/LimitDecreaseFactor
limit = 1000 ## util which pt to try to gather the data ## Hardcoded
length = 1000
# query='''SELECT 1 limit {lim} offset {off}'''
DF = pd.DataFrame()
##GP
for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]:
if DF.shape == (0, 0):
try:
offcurr = 0
while offcurr < start:
LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2)
QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr))
tempDF = Exec_BQ(QueryToUse, BQ_Cred)
DF = DF.append(tempDF, ignore_index = True)
offcurr += i
except Exception as error:
txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error)
LevBasedPrint(txt, 2)
AddRecommendation(txt, config)
# raise Exception(txt)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',2,1)
return DF
# ------------------------------------------------------------------------------------------- #
# -------------------------------------------------- ImportData --------------------------------------------------- #
def ImportData(config):
"""
Can be used to import data from either storage or BQ
Extracts any size data from any SID of any number of days.
Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest'
'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling
'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations
"""
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
AccessDataFrom = config['DataCollection']['GetDataFrom']
if AccessDataFrom == 'BQ':
SettingToUse = config['IterationAim']['CycleType']
if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr'])
FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse)
GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData']
elif AccessDataFrom == 'Storage':
FileName = config['InputPaths']['Storage_RawData']
else:
print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom))
txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".'
AddRecommendation(txt, config)
raise Exception(txt)
LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1)
LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1)
# ----------------------------<<< Accessing Data from BQ >>>------------------------------- #
if AccessDataFrom == 'BQ':
# -----------------------<<< Setting Configuration for GlTest >>>-------------------------- #
if(SettingToUse == 'GlTest'):
| DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay']
LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1)
LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3)
if DataGrabMethodology == 'static':
Dates = ast.literal_eval(config['IfStatic']['Date']) | random_line_split |
SL1_ImportData.py | -----------<<< Setting constant values that are to be used inside function >>>----------- #
DatasetName = config['BigQueryConfig']['DatasetName']
SIDs = ast.literal_eval(config['DomainConfig']['SIDs'])
DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay']
LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1)
LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3)
if DataGrabMethodology == 'static':
Dates = ast.literal_eval(config['IfStatic']['Date'])
StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days'])
elif DataGrabMethodology == 'dynamic':
DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr']))
else:
txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".'
AddRecommendation(txt, config)
raise Exception(txt)
# -----------------------------<<< Generating Table Names >>>------------------------------ #
## Generating Table Names
if DataGrabMethodology == 'static':
if StaDataWindow != '-':
CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2]))
format = '%d%m%y'
Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ]
TableToInclude = ''
for i in range(len(SIDs)):
for j in range(len(Dates)):
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),'
elif DataGrabMethodology == 'dynamic':
CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC
TableDateToTake = []
while DynDataWindow >= -1: ## -1 to even include the current hour table
tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0)
TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H'))
DynDataWindow -= 1
TableToInclude, TableCnt = '', 0
for i in range(len(SIDs)):
for j in range(len(TableDateToTake)):
TableCnt += 0
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),'
LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',3,1)
return TableToInclude
# ------------------------------------------------------------------------------------------- #
def GrabAnySizeDatafromGoogleBQ(config):
'''
Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks
'''
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
ModuleSetting = config['Config']['ModuleSettingRuleName']
BQ_Cred = config['BigQueryConfig']['ProjectID']
if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr'])
BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery']
LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart']
LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor'])
LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1)
# -------------------------<<< Generating Tables Name To Query >>>------------------------- #
TableToInclude = GenerateTableNames(config)
#print(TableToInclude)
# -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ #
## Getting the string that will be used to create bins for grouping based on a certain TimePeriod
GroupsToInclude = ''
if ModuleSetting == 'ICLSSTA':
for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit
ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600)
ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1)
GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i)
# ------------------------<<< Reading Query From External File >>>------------------------- #
LevBasedPrint('Read from a locally saved Query File', 2)
queryfile = open(BQ_QueryFile, 'r')
query = queryfile.read()
queryfile.close()
# --------------------<<< Importing Data in Max possible batch size >>>-------------------- #
## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered
## GP
start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract
ratio = 1/LimitDecreaseFactor
limit = 1000 ## util which pt to try to gather the data ## Hardcoded
length = 1000
# query='''SELECT 1 limit {lim} offset {off}'''
DF = pd.DataFrame()
##GP
for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]:
if DF.shape == (0, 0):
try:
offcurr = 0
while offcurr < start:
LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2)
QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr))
tempDF = Exec_BQ(QueryToUse, BQ_Cred)
DF = DF.append(tempDF, ignore_index = True)
offcurr += i
except Exception as error:
txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error)
LevBasedPrint(txt, 2)
AddRecommendation(txt, config)
# raise Exception(txt)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',2,1)
return DF
# ------------------------------------------------------------------------------------------- #
# -------------------------------------------------- ImportData --------------------------------------------------- #
def | (config):
"""
Can be used to import data from either storage or BQ
Extracts any size data from any SID of any number of days.
Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest'
'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling
'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations
"""
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
AccessDataFrom = config['DataCollection']['GetDataFrom']
if AccessDataFrom == 'BQ':
SettingToUse = config['IterationAim']['CycleType']
if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr'])
FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse)
GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData']
elif AccessDataFrom == 'Storage':
FileName = config['InputPaths']['Storage_RawData']
else:
print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom))
txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".'
AddRecommendation(txt, config)
raise Exception(txt)
LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1)
LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1)
# ----------------------------<<< Accessing Data from BQ >>>------------------------------- #
if AccessDataFrom == 'BQ':
# -----------------------<<< Setting Configuration for GlTest >>>-------------------------- #
if(SettingToUse == 'GlTest | ImportData | identifier_name |
SL1_ImportData.py | -----------<<< Setting constant values that are to be used inside function >>>----------- #
DatasetName = config['BigQueryConfig']['DatasetName']
SIDs = ast.literal_eval(config['DomainConfig']['SIDs'])
DataGrabMethodology = config['DomainConfig']['UseStaticOrDynamicCurrentDay']
LevBasedPrint('Inside "'+GenerateTableNames.__name__+'" function and configurations for this has been set.',3,1)
LevBasedPrint('Data collection methodology that has been selected : ' + str(DataGrabMethodology),3)
if DataGrabMethodology == 'static':
Dates = ast.literal_eval(config['IfStatic']['Date'])
StaDataWindow = ast.literal_eval(config['IfStatic']['DataGrabWindow_Days'])
elif DataGrabMethodology == 'dynamic':
DynDataWindow = int(ast.literal_eval(config['IfDynamic']['DataGrabWindow_Hr']))
else:
txt = 'Exception: Wrong Configuration has been passed in "UseStaticOrDynamicCurrentDay".'
AddRecommendation(txt, config)
raise Exception(txt)
# -----------------------------<<< Generating Table Names >>>------------------------------ #
## Generating Table Names
if DataGrabMethodology == 'static':
if StaDataWindow != '-':
CustomDate = date(2000 + int(Dates[0][4:6]), int(Dates[0][2:4]), int(Dates[0][0:2]))
format = '%d%m%y'
Dates = [ (CustomDate + timedelta(days=i)).strftime(format) for i in range(int(StaDataWindow)) ]
TableToInclude = ''
for i in range(len(SIDs)):
for j in range(len(Dates)):
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + Dates[j] + '_%"\'),'
elif DataGrabMethodology == 'dynamic':
CurrentTime = datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday, time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec) ## UTC
TableDateToTake = []
while DynDataWindow >= -1: ## -1 to even include the current hour table
tempDate = CurrentTime - timedelta(days = 0, hours = DynDataWindow, minutes = 0)
TableDateToTake.append(tempDate.strftime(format = '%d%m%y_%H'))
DynDataWindow -= 1
TableToInclude, TableCnt = '', 0
for i in range(len(SIDs)):
for j in range(len(TableDateToTake)):
TableCnt += 0
TableToInclude += '\n\tTABLE_QUERY([{}.Citadel_Stream],\'table_id like "'.format(DatasetName) + SIDs[i] + '_' + TableDateToTake[j] + '%"\'),'
LevBasedPrint('Total number of tables accessed : '+str(TableCnt),3)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',3,1)
return TableToInclude
# ------------------------------------------------------------------------------------------- #
def GrabAnySizeDatafromGoogleBQ(config):
'''
Incase if dataset size is too large then this function will enable the extraction of whole dataset by getting the data in chunks
'''
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
ModuleSetting = config['Config']['ModuleSettingRuleName']
BQ_Cred = config['BigQueryConfig']['ProjectID']
if ModuleSetting == 'ICLSSTA': BinSizeBasedOnPeriod_Hr = int(config['Config']['ICLSSTA_BinSizeBasedOnPeriod_Hr'])
BQ_QueryFile = config['InputPaths']['BQ_DataImportQuery']
LimitToStartWith = config['BigQueryConfig']['BQ_LimitToStart']
LimitDecreaseFactor = float(config['BigQueryConfig']['BQ_LimitDecreaseFactor'])
LevBasedPrint('Inside "'+GrabAnySizeDatafromGoogleBQ.__name__+'" function and configurations for this has been set.',2,1)
# -------------------------<<< Generating Tables Name To Query >>>------------------------- #
TableToInclude = GenerateTableNames(config)
#print(TableToInclude)
# -------------------------<<< Creating Bin Setting For ICLSSTA >>>------------------------ #
## Getting the string that will be used to create bins for grouping based on a certain TimePeriod
GroupsToInclude = ''
if ModuleSetting == 'ICLSSTA':
for i in range(1000): ##even if the bin size is as small as an hour, BQ has a limitation of accessing upto a max of 1000 Table, so this is the max possible limit
ll_insec = int(i*BinSizeBasedOnPeriod_Hr *3600)
ul_insec = int((i+1)*BinSizeBasedOnPeriod_Hr *3600 - 1)
GroupsToInclude += '\n\tWHEN (CurrentTimeStamp - CurrentHitTimeStamp) BETWEEN {low} AND {upp} THEN "Bin_{WhichBin}"'.format(low= ll_insec,upp= ul_insec, WhichBin= i)
# ------------------------<<< Reading Query From External File >>>------------------------- #
LevBasedPrint('Read from a locally saved Query File', 2)
queryfile = open(BQ_QueryFile, 'r')
query = queryfile.read()
queryfile.close()
# --------------------<<< Importing Data in Max possible batch size >>>-------------------- #
## looping over the limit and offset to grab the maximum possible bite in terms of observation that can be gathered
## GP
start = int(LimitToStartWith) # should be equal to the maximum number of observation that you want to extract
ratio = 1/LimitDecreaseFactor
limit = 1000 ## util which pt to try to gather the data ## Hardcoded
length = 1000
# query='''SELECT 1 limit {lim} offset {off}'''
DF = pd.DataFrame()
##GP
for i in [ int(start * ratio ** (n - 1)) for n in range(1, length + 1) if start * ratio ** (n - 1) > limit ]:
if DF.shape == (0, 0):
try:
offcurr = 0
while offcurr < start:
|
except Exception as error:
txt = 'Exception: In importing data from BQ was thrown!\nLimit used: ' + str(i) + '\n' + str(error)
LevBasedPrint(txt, 2)
AddRecommendation(txt, config)
# raise Exception(txt)
# ---------------------------------------<<< xyz >>>--------------------------------------- #
LevBasedPrint('',2,1)
return DF
# ------------------------------------------------------------------------------------------- #
# -------------------------------------------------- ImportData --------------------------------------------------- #
def ImportData(config):
"""
Can be used to import data from either storage or BQ
Extracts any size data from any SID of any number of days.
Works in Two Configuration(config['IterationAim']['CycleType']), namely 'TrainTest' & 'GlTest'
'TrainTest' is for models training purpose where This Dataset is split later too make dataset size adequate for training uing sampling
'GlTest' is purely for prediction purpose, i.e. it will be used as testset only and will consume saved model to provide labels to observations
"""
# -----------<<< Setting constant values that are to be used inside function >>>----------- #
AccessDataFrom = config['DataCollection']['GetDataFrom']
if AccessDataFrom == 'BQ':
SettingToUse = config['IterationAim']['CycleType']
if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr'])
FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse)
GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData']
elif AccessDataFrom == 'Storage':
FileName = config['InputPaths']['Storage_RawData']
else:
print('Wrong setting in "GetDataFrom", current value is {}'.format(AccessDataFrom))
txt = 'Exception: Wrong Configuration has been passed in "GetDataFrom".'
AddRecommendation(txt, config)
raise Exception(txt)
LevBasedPrint('Inside "'+ImportData.__name__+'" function and configurations for this has been set.',1,1)
LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1)
# ----------------------------<<< Accessing Data from BQ >>>------------------------------- #
if AccessDataFrom == 'BQ':
# -----------------------<<< Setting Configuration for GlTest >>>-------------------------- #
if(SettingToUse == 'GlTest | LevBasedPrint('Setting used in extracting data from BQ:\tNo. of obs. extracted per cycle (limit) = ' + str(i) + '\tOffset = ' + str(offcurr),2)
QueryToUse = query.format(BinToUse = GroupsToInclude, TableToInclude = TableToInclude, lim = str(i), off = str(offcurr))
tempDF = Exec_BQ(QueryToUse, BQ_Cred)
DF = DF.append(tempDF, ignore_index = True)
offcurr += i | conditional_block |
resnet.rs | , osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add != 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride != 1 || inplanes != planes * expansion;
if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i }));
}), b2)
}
}
fn layer(inplanes: u32, planes: u32, blocks: u32, size: u32, stride: u32, bottleneck: bool) - | > (im | identifier_name |
|
resnet.rs | _i, ff_o_i, ff_i, yy_o_o_o, yy_o_o_i, yy_o_i, yy_i, xx_o_o_o, xx_o_o_i, xx_o_i, xx_i
b_final.reorder_n(&[(0, 0), (1, 4), (2, 8), (3, 1), (4, 5), (5, 9), (6, 2), (7, 6), (8, 10), (9, 3), (10, 7), (11, 11), ]);
// ff_o_o_o, yy_o_o_o, xx_o_o_o, ff_o_o_i, yy_o_o_i, xx_o_o_i, ff_o_i, yy_o_i, xx_o_i, ff_i, yy_i, xx_i
b.tags(0..=(if oc / ff0 / ff1 / ff2 < 32 { 5 } else { 0 }), Parallel);
if yy0 > 1 && yy0 < 32 { b.tag(17, Vectorize); }
let (ff_local, xx_local, yy_local) = (ff0 * ff1, xx0 * xx1, yy0 * yy1);
let b_local = f.buf("b_local", F32, Temp, x![ff_local, xx_local, yy_local])
.set_loc(Local).set_zero_init(true);
b_local.alloc_at(b, 5);
b.before(b_final, 6);
b.store_at(b_local, x![i0 % ff_local, i1 % xx_local, i2 % yy_local]);
b_final.store(buf_b);
if pad_buf != a { pad_buf.alloc_at_func(); }
f.compile_arg("-mprefer-vector-width=512");
let lib = Rc::new(if let Some(x) = buf_add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap());
lib_cache.push(([ic, oc, size, kern, stride, pad, add, relu], lib.clone()));
lib
};
static mut ID: u32 = 0;
let id = unsafe { (ID, ID += 1).0 };
let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i, add| {
if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add != 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) { | let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]); | random_line_split |
|
resnet.rs | bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
}, b1)
}
// naive版本,能跑但很慢
// fn conv(ic: u32, oc: u32, size: u32, kern: u32, stride: u32, pad: u32, add: u32, relu: u32)
// -> (impl Fn(M, Option<M>), M) {
// println!("ic: {}, oc: {}, size: {}, kern: {}, stride: {}, pad: {}", ic, oc, size, kern, stride, pad);
//
// let name = format!("ic{}_oc{}_size{}_kern{}_stride{}_pad{}_add{}_relu{}", ic, oc, size, kern, stride, pad, add, relu);
// let f = Func::new(&name);
// let a = f.buf("A", F32, In, x![ic, size, size]); // NCHW
// let w = f.buf("W", F32, In, x![oc, ic, kern, kern]); // OIHW
// let bias = f.buf("BIAS", F32, In, x![oc,]);
// let osize = (size - kern + 2 * pad) / stride + 1;
// let buf_b = f.buf("B", F32, Out, x![oc, osize, osize]); // NCHW
// let a_pad = f.comp("A_pad", x![ic, size + 2 * pad, size + 2 * pad],
// x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add != 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride != 1 || inplanes != planes * expansion;
if bottleneck { | conditional_block |
||
resnet.rs | a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
// a_pad.set_inline(true);
//
// let b_init = f.comp("B_init", x![oc, osize, osize], x!(bias(i0)));
// let b = f.comp("B", x![oc, osize, osize, ic, kern, kern], x!(0f32));
// b.set_expr(x!(a_pad(i3, i1 * stride + i4, i2 * stride + i5) * w(i0, i3, i4, i5) + b(i0, i1, i2, i3, i4, i5)));
// let (b_final, add) = if add != 0 { // add-relu
// let add = f.buf("ADD", F32, In, x![oc, osize, osize]);
// (x!(max::<f32>(0, add(i0, i1, i2) + buf_b(i0, i1, i2))), Some(add))
// } else {
// (if relu != 0 { x!(max::<f32>(0, buf_b(i0, i1, i2))) } else { x!(buf_b(i0, i1, i2)) }, None)
// };
// let b_final = f.comp("B_final", x![oc, osize, osize], b_final);
// b_init.before(b, 3).before(b_final, 3);
// b_init.store(buf_b);
// b.store_at(buf_b, x![i0, i1, i2]);
// b_final.store(buf_b);
//
// let lib = if let Some(x) = add { f.codegen(&[a, w, bias, x, buf_b]) } else { f.codegen(&[a, w, bias, buf_b]) }.unwrap();
//
// static mut ID: u32 = 0;
// let id = unsafe { (ID, ID += 1).0 };
// let (w, bias, b) = (w.array(read!("conv{}_w", id)), bias.array(read!("conv{}_b", id)), buf_b.array(ArrayInit::None));
// let b1 = *b;
// (move |i, add| {
// if let Some(x) = add { (lib.f)([i, *w, *bias, x, *b].as_ptr()); } else { (lib.f)([i, *w, *bias, *b].as_ptr()); }
// }, b1)
// }
fn maxpool(chan: u32, size: u32, kern: u32, stride: u32, pad: u32) -> (impl Fn(M), M) {
let f = Func::new("maxpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let a_pad = f.comp("A_pad", x![chan, size + 2 * pad, size + 2 * pad],
x!(if i1 >= pad && i1 - pad < size && i2 >= pad && i2 - pad < size { a(i0, i1 - pad, i2 - pad) } else { 0f32 }));
a_pad.set_inline(true);
let osize = (size - kern + 2 * pad) / stride + 1;
let buf_b = f.buf("B", F32, Out, x![chan, osize, osize]);
let b_init = f.comp("B_init", x![chan, osize, osize], x!(0)); // 初值取0是可行的,因为在relu后,输入都是>=0的
let b = f.comp("B", x![chan, osize, osize, kern, kern],
x!(max::<f32>(a_pad(i0, i1 * stride + i3, i2 * stride + i4), buf_b(i0, i1, i2))));
b_init.before(b, 3);
b_init.store(buf_b);
b.store_at(buf_b, x![i0, i1, i2]);
b.tag(0, Parallel);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn avgpool(chan: u32, size: u32) -> (impl Fn(M), M) {
let f = Func::new("avgpool");
let a = f.buf("A", F32, In, x![chan, size, size]);
let buf_b = f.buf("B", F32, Out, x![chan,]);
let b_init = f.comp("B_init", x![chan,], x!(0));
let b = f.comp("B", x![chan, size, size], x!(a(i0, i1, i2) + buf_b(i0)));
let b_final = f.comp("B_final", x![chan,], x!(buf_b(i0) / ((size * size))));
b_init.before(b, 1).before(b_final, 1);
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_final.store(buf_b);
let lib = f.codegen(&[a, buf_b]).unwrap();
let b = buf_b.array(ArrayInit::None);
let b1 = *b;
(move |i| { (lib.f)([i, *b].as_ptr()) }, b1)
}
fn gemv(m: u32, n: u32) -> (impl Fn(M), M) {
let f = Func::new("gemv");
let a = f.buf("A", F32, In, x![n,]);
let w = f.buf("W", F32, In, x![m, n]);
let c = f.buf("C", F32, In, x![m,]);
let buf_b = f.buf("B", F32, Out, x![m,]);
let b_init = f.comp("B_init", x![m,], x!(c(i0)));
let b = f.comp("B", x![m, n], x!(a(i1) * w(i0, i1) + buf_b(i0)));
b_init.store(buf_b);
b.store_at(buf_b, x![i0,]);
b_init.before(b, 1);
b.tag(0, Parallel);
let lib = f.codegen(&[a, w, c, buf_b]).unwrap();
let (w, c, b) = (w.array(read!("gemv_w",)), c.array(read!("gemv_b",)), buf_b.array(ArrayInit::None));
let b1 = *b;
(move |i| { (lib.f)([i, *w, *c, *b].as_ptr()) }, b1)
}
fn block(inplanes: u32, planes: u32, size: u32, stride: u32, bottleneck: bool) -> (Box<dyn Fn(M)>, M) {
let expansion = if bottleneck { 4 } else { 1 };
let downsample = stride != 1 || inpl | anes != planes * expansion;
if bottleneck {
let (f1, b1) = conv(inplanes, planes, size, 1, stride, 0, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 0, 1);
let (f3, b3) = conv(planes, planes * expansion, size / stride, 1, 1, 0, 1, 1);
let f4 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f4, _)) = &f4 { f4(i, None); }
f1(i, None);
f2(b1, None);
f3(b2, Some(if let Some((_, b4)) = f4 { b4 } else { i }));
}), b3)
} else {
let (f1, b1) = conv(inplanes, planes, size, 3, stride, 1, 0, 1);
let (f2, b2) = conv(planes, planes, size / stride, 3, 1, 1, 1, 1);
let f3 = if downsample { Some(conv(inplanes, planes * expansion, size, 1, stride, 0, 0, 0)) } else { None };
(Box::new(move |i| {
if let Some((f3, _)) = &f3 { f3(i, None); }
f1(i, None);
f2(b1, Some(if let Some((_, b3)) = f3 { b3 } else { i })); | identifier_body |
|
ClassifierAdapter.py | .DataObjects import *
def get_emotion_by_id(id):
if id == 1:
return 'Anger'
elif id == 2:
return 'Disgust'
elif id == 3:
return 'Sad'
elif id == 4:
return 'Happy'
elif id == 5:
return 'Surprise'
else:
return 'Fear'
author_columns = ['name', 'domain', 'author_guid', 'author_screen_name',
'author_full_name', 'author_osn_id', 'description', 'created_at',
'statuses_count', 'followers_count', 'favourites_count',
'friends_count', 'listed_count', 'language', 'profile_background_color',
'profile_background_tile', 'profile_banner_url', 'profile_image_url',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_text_color', 'default_profile', 'contributors_enabled',
'default_profile_image', 'geo_enabled', 'protected', 'location',
'notifications', 'time_zone', 'url', 'utc_offset', 'verified',
'is_suspended_or_not_exists', 'default_post_format', 'likes_count',
'allow_questions', 'allow_anonymous_questions', 'image_size',
'media_path', 'author_type', 'bad_actors_collector_insertion_date',
'xml_importer_insertion_date', 'vico_dump_insertion_date',
'missing_data_complementor_insertion_date',
'bad_actors_markup_insertion_date',
'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type',
'timeline_overlap_insertion_date',
'original_tweet_importer_insertion_date']
post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content',
'description', 'is_detailed', 'is_LB', 'is_valid', 'domain',
'author_guid', 'media_path', 'post_osn_guid', 'post_type',
'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet',
'is_created_via_mobile', 'source_url', 'source_title', 'is_liked',
'post_state', 'post_osn_id', 'retweet_count', 'favorite_count',
'created_at', 'xml_importer_insertion_date',
'timeline_importer_insertion_date',
'original_tweet_importer_insertion_date']
claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords',
'domain', 'verdict', 'category', 'sub_category']
connection_columns = ['claim_id', 'post_id']
# subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True)
# ours, should write also stub
class ClassifierAdapter:
def __init__(self):
self.sid = SentimentIntensityAnalyzer()
self.i=0
def get_sentiment(self,text) -> int:
snt = self.sid.polarity_scores(text)
return round(snt['pos']*3-snt['neg']*3)
def | (self,text):
emo = te.get_emotion(text)
return max(emo, key=emo.get) # The output we received,
def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"):
topics = []
tweets = []
authors = []
topic_tweet_connection = []
for trend in trends_dict.keys():
for topic in trends_dict[trend].claims:
topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input
for tweet in topic.tweets:
topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id})
tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count})
authors.append({'name':tweet.author_name})
pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False)
pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False)
pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False)
pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False)
self.i+=1
def _classify_topic(self):
subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True)
results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']]
return results
def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>}
processed_data = {}
if len(trends_dict)==0:
return
self._trends_to_csv(trends_dict)
results = self._classify_topic()
print("got classifier results\nparsing the results and running sentiment and emotion")
for trend in trends_dict.keys():
print("start trend {}".format(trend))
if trend not in processed_data:
processed_data[trend] = list()
for topic in trends_dict[trend].claims:
tweets = list()
for tweet in topic.tweets:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
# sentiment = randint(-3, 3)
sentiment = self.get_sentiment(tweet.content)
# rand = randrange(6)
emotion = self.get_emotion(tweet.content)
analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date,
tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
print(f"add tweet {tweet} to the topic {topic}")
print(f"save the topic {topic}, with the list of tweets: {tweets}")
processed_data[trend].append(Claim(topic.name, tweets,topic.id))
time.sleep(1)
results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake")
return callback(processed_data, trends_dict,results)
def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>}
# print(data)
# processed_data = {}
# for key in data.keys():
# if key not in processed_data:
# processed_data[key]={}
# for tweet in data[key].keys():
# processed_data[key][tweet]={}
# rand = randrange(100)
# if rand < 50:
# processed_data[key][tweet]['prediction'] = "wow it's fake"
# else:
# processed_data[key][tweet]['prediction'] = "100% true"
# sentiment = randint(-3, 3)
# processed_data[key][tweet]['sentiment'] = sentiment
# rand = randrange(6)
# processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand)
processed_data = {}
for claim in data.keys():
# if claim not in processed_data:
# processed_data[claim]= list()
tweets = list()
for tweet in data[claim]:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
sentiment = randint(-3, 3)
rand = randrange(6)
emotion = get_emotion_by_id(rand)
analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
if claim in processed_data.keys():
processed_data[claim].append(Claim(claim, tweets))
else:
processed_data[claim] = Claim(claim, tweets)
time.sleep(1)
return callback(processed_data)
def get_claims_from_trend(self, trends_tweets):
claims = {'claim1': {}, 'claim2': {}}
for status in trends_tweets:
rand = randrange(10)
# print(status.id)
# print(status.text)
# print(status.author.name)
if rand < 5:
claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
else:
# print(status)
claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
return claims
def _get_claim_from_trend(self, trends_tweets):
print("topic model")
df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets])
df = df[['id', 'content','author_name']]
if len(df) < 15:
print("less then 15 tweets, creating 1 topic")
from collections import Counter
claim_text = ' '.join([txt[0] for txt in
Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common | get_emotion | identifier_name |
ClassifierAdapter.py | .DataObjects import *
def get_emotion_by_id(id):
if id == 1:
return 'Anger'
elif id == 2:
return 'Disgust'
elif id == 3:
return 'Sad'
elif id == 4:
return 'Happy'
elif id == 5:
return 'Surprise'
else:
return 'Fear'
author_columns = ['name', 'domain', 'author_guid', 'author_screen_name',
'author_full_name', 'author_osn_id', 'description', 'created_at',
'statuses_count', 'followers_count', 'favourites_count',
'friends_count', 'listed_count', 'language', 'profile_background_color',
'profile_background_tile', 'profile_banner_url', 'profile_image_url',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_text_color', 'default_profile', 'contributors_enabled',
'default_profile_image', 'geo_enabled', 'protected', 'location',
'notifications', 'time_zone', 'url', 'utc_offset', 'verified',
'is_suspended_or_not_exists', 'default_post_format', 'likes_count',
'allow_questions', 'allow_anonymous_questions', 'image_size',
'media_path', 'author_type', 'bad_actors_collector_insertion_date',
'xml_importer_insertion_date', 'vico_dump_insertion_date',
'missing_data_complementor_insertion_date',
'bad_actors_markup_insertion_date',
'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type',
'timeline_overlap_insertion_date',
'original_tweet_importer_insertion_date']
post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content',
'description', 'is_detailed', 'is_LB', 'is_valid', 'domain',
'author_guid', 'media_path', 'post_osn_guid', 'post_type',
'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet',
'is_created_via_mobile', 'source_url', 'source_title', 'is_liked',
'post_state', 'post_osn_id', 'retweet_count', 'favorite_count',
'created_at', 'xml_importer_insertion_date',
'timeline_importer_insertion_date',
'original_tweet_importer_insertion_date']
claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords',
'domain', 'verdict', 'category', 'sub_category']
connection_columns = ['claim_id', 'post_id']
# subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True)
# ours, should write also stub
class ClassifierAdapter:
def __init__(self):
self.sid = SentimentIntensityAnalyzer()
self.i=0
def get_sentiment(self,text) -> int:
snt = self.sid.polarity_scores(text)
return round(snt['pos']*3-snt['neg']*3)
def get_emotion(self,text):
|
def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"):
topics = []
tweets = []
authors = []
topic_tweet_connection = []
for trend in trends_dict.keys():
for topic in trends_dict[trend].claims:
topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input
for tweet in topic.tweets:
topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id})
tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count})
authors.append({'name':tweet.author_name})
pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False)
pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False)
pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False)
pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False)
self.i+=1
def _classify_topic(self):
subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True)
results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']]
return results
def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>}
processed_data = {}
if len(trends_dict)==0:
return
self._trends_to_csv(trends_dict)
results = self._classify_topic()
print("got classifier results\nparsing the results and running sentiment and emotion")
for trend in trends_dict.keys():
print("start trend {}".format(trend))
if trend not in processed_data:
processed_data[trend] = list()
for topic in trends_dict[trend].claims:
tweets = list()
for tweet in topic.tweets:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
# sentiment = randint(-3, 3)
sentiment = self.get_sentiment(tweet.content)
# rand = randrange(6)
emotion = self.get_emotion(tweet.content)
analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date,
tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
print(f"add tweet {tweet} to the topic {topic}")
print(f"save the topic {topic}, with the list of tweets: {tweets}")
processed_data[trend].append(Claim(topic.name, tweets,topic.id))
time.sleep(1)
results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake")
return callback(processed_data, trends_dict,results)
def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>}
# print(data)
# processed_data = {}
# for key in data.keys():
# if key not in processed_data:
# processed_data[key]={}
# for tweet in data[key].keys():
# processed_data[key][tweet]={}
# rand = randrange(100)
# if rand < 50:
# processed_data[key][tweet]['prediction'] = "wow it's fake"
# else:
# processed_data[key][tweet]['prediction'] = "100% true"
# sentiment = randint(-3, 3)
# processed_data[key][tweet]['sentiment'] = sentiment
# rand = randrange(6)
# processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand)
processed_data = {}
for claim in data.keys():
# if claim not in processed_data:
# processed_data[claim]= list()
tweets = list()
for tweet in data[claim]:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
sentiment = randint(-3, 3)
rand = randrange(6)
emotion = get_emotion_by_id(rand)
analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
if claim in processed_data.keys():
processed_data[claim].append(Claim(claim, tweets))
else:
processed_data[claim] = Claim(claim, tweets)
time.sleep(1)
return callback(processed_data)
def get_claims_from_trend(self, trends_tweets):
claims = {'claim1': {}, 'claim2': {}}
for status in trends_tweets:
rand = randrange(10)
# print(status.id)
# print(status.text)
# print(status.author.name)
if rand < 5:
claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
else:
# print(status)
claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
return claims
def _get_claim_from_trend(self, trends_tweets):
print("topic model")
df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets])
df = df[['id', 'content','author_name']]
if len(df) < 15:
print("less then 15 tweets, creating 1 topic")
from collections import Counter
claim_text = ' '.join([txt[0] for txt in
Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common | emo = te.get_emotion(text)
return max(emo, key=emo.get) # The output we received, | identifier_body |
ClassifierAdapter.py | .DataObjects import *
def get_emotion_by_id(id):
if id == 1:
return 'Anger'
elif id == 2:
return 'Disgust'
elif id == 3:
return 'Sad'
elif id == 4:
return 'Happy'
elif id == 5:
return 'Surprise'
else:
return 'Fear'
author_columns = ['name', 'domain', 'author_guid', 'author_screen_name',
'author_full_name', 'author_osn_id', 'description', 'created_at',
'statuses_count', 'followers_count', 'favourites_count',
'friends_count', 'listed_count', 'language', 'profile_background_color',
'profile_background_tile', 'profile_banner_url', 'profile_image_url',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_text_color', 'default_profile', 'contributors_enabled',
'default_profile_image', 'geo_enabled', 'protected', 'location',
'notifications', 'time_zone', 'url', 'utc_offset', 'verified',
'is_suspended_or_not_exists', 'default_post_format', 'likes_count',
'allow_questions', 'allow_anonymous_questions', 'image_size',
'media_path', 'author_type', 'bad_actors_collector_insertion_date',
'xml_importer_insertion_date', 'vico_dump_insertion_date',
'missing_data_complementor_insertion_date',
'bad_actors_markup_insertion_date',
'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type',
'timeline_overlap_insertion_date',
'original_tweet_importer_insertion_date']
post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content',
'description', 'is_detailed', 'is_LB', 'is_valid', 'domain',
'author_guid', 'media_path', 'post_osn_guid', 'post_type',
'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet',
'is_created_via_mobile', 'source_url', 'source_title', 'is_liked',
'post_state', 'post_osn_id', 'retweet_count', 'favorite_count',
'created_at', 'xml_importer_insertion_date',
'timeline_importer_insertion_date',
'original_tweet_importer_insertion_date']
claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords',
'domain', 'verdict', 'category', 'sub_category']
connection_columns = ['claim_id', 'post_id']
# subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True)
# ours, should write also stub
class ClassifierAdapter:
def __init__(self):
self.sid = SentimentIntensityAnalyzer()
self.i=0
def get_sentiment(self,text) -> int:
snt = self.sid.polarity_scores(text)
return round(snt['pos']*3-snt['neg']*3)
def get_emotion(self,text):
emo = te.get_emotion(text)
return max(emo, key=emo.get) # The output we received,
def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"):
topics = []
tweets = []
authors = []
topic_tweet_connection = []
for trend in trends_dict.keys():
for topic in trends_dict[trend].claims:
topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input
for tweet in topic.tweets:
topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id})
tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count})
authors.append({'name':tweet.author_name})
pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False)
pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False)
pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False)
pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False)
self.i+=1
def _classify_topic(self):
subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True)
results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']]
return results
def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>}
processed_data = {}
if len(trends_dict)==0:
return
self._trends_to_csv(trends_dict)
results = self._classify_topic()
print("got classifier results\nparsing the results and running sentiment and emotion")
for trend in trends_dict.keys():
| print(f"add tweet {tweet} to the topic {topic}")
print(f"save the topic {topic}, with the list of tweets: {tweets}")
processed_data[trend].append(Claim(topic.name, tweets,topic.id))
time.sleep(1)
results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake")
return callback(processed_data, trends_dict,results)
def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>}
# print(data)
# processed_data = {}
# for key in data.keys():
# if key not in processed_data:
# processed_data[key]={}
# for tweet in data[key].keys():
# processed_data[key][tweet]={}
# rand = randrange(100)
# if rand < 50:
# processed_data[key][tweet]['prediction'] = "wow it's fake"
# else:
# processed_data[key][tweet]['prediction'] = "100% true"
# sentiment = randint(-3, 3)
# processed_data[key][tweet]['sentiment'] = sentiment
# rand = randrange(6)
# processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand)
processed_data = {}
for claim in data.keys():
# if claim not in processed_data:
# processed_data[claim]= list()
tweets = list()
for tweet in data[claim]:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
sentiment = randint(-3, 3)
rand = randrange(6)
emotion = get_emotion_by_id(rand)
analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
if claim in processed_data.keys():
processed_data[claim].append(Claim(claim, tweets))
else:
processed_data[claim] = Claim(claim, tweets)
time.sleep(1)
return callback(processed_data)
def get_claims_from_trend(self, trends_tweets):
claims = {'claim1': {}, 'claim2': {}}
for status in trends_tweets:
rand = randrange(10)
# print(status.id)
# print(status.text)
# print(status.author.name)
if rand < 5:
claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
else:
# print(status)
claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
return claims
def _get_claim_from_trend(self, trends_tweets):
print("topic model")
df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets])
df = df[['id', 'content','author_name']]
if len(df) < 15:
print("less then 15 tweets, creating 1 topic")
from collections import Counter
claim_text = ' '.join([txt[0] for txt in
Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common(
| print("start trend {}".format(trend))
if trend not in processed_data:
processed_data[trend] = list()
for topic in trends_dict[trend].claims:
tweets = list()
for tweet in topic.tweets:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
# sentiment = randint(-3, 3)
sentiment = self.get_sentiment(tweet.content)
# rand = randrange(6)
emotion = self.get_emotion(tweet.content)
analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date,
tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment,
prediction)
tweets.append(analyzed_tweet) | conditional_block |
ClassifierAdapter.py | Manager.DataObjects import *
def get_emotion_by_id(id):
if id == 1:
return 'Anger'
elif id == 2:
return 'Disgust'
elif id == 3:
return 'Sad'
elif id == 4:
return 'Happy'
elif id == 5:
return 'Surprise'
else:
return 'Fear'
author_columns = ['name', 'domain', 'author_guid', 'author_screen_name',
'author_full_name', 'author_osn_id', 'description', 'created_at',
'statuses_count', 'followers_count', 'favourites_count',
'friends_count', 'listed_count', 'language', 'profile_background_color',
'profile_background_tile', 'profile_banner_url', 'profile_image_url',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_text_color', 'default_profile', 'contributors_enabled',
'default_profile_image', 'geo_enabled', 'protected', 'location',
'notifications', 'time_zone', 'url', 'utc_offset', 'verified',
'is_suspended_or_not_exists', 'default_post_format', 'likes_count',
'allow_questions', 'allow_anonymous_questions', 'image_size',
'media_path', 'author_type', 'bad_actors_collector_insertion_date',
'xml_importer_insertion_date', 'vico_dump_insertion_date',
'missing_data_complementor_insertion_date',
'bad_actors_markup_insertion_date',
'mark_missing_bad_actor_retweeters_insertion_date', 'author_sub_type',
'timeline_overlap_insertion_date',
'original_tweet_importer_insertion_date']
post_columns = ['post_id', 'author', 'guid', 'title', 'url', 'date', 'content',
'description', 'is_detailed', 'is_LB', 'is_valid', 'domain',
'author_guid', 'media_path', 'post_osn_guid', 'post_type',
'post_format', 'reblog_key', 'tags', 'is_created_via_bookmarklet',
'is_created_via_mobile', 'source_url', 'source_title', 'is_liked',
'post_state', 'post_osn_id', 'retweet_count', 'favorite_count',
'created_at', 'xml_importer_insertion_date',
'timeline_importer_insertion_date',
'original_tweet_importer_insertion_date']
claims_columns = ['claim_id', 'title', 'description', 'url', 'verdict_date', 'keywords',
'domain', 'verdict', 'category', 'sub_category']
connection_columns = ['claim_id', 'post_id']
# subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'D:\aviad fake v3\fake-news-framework_Py3',shell=True)
# ours, should write also stub
class ClassifierAdapter:
def __init__(self):
self.sid = SentimentIntensityAnalyzer()
self.i=0
def get_sentiment(self,text) -> int:
snt = self.sid.polarity_scores(text)
return round(snt['pos']*3-snt['neg']*3)
def get_emotion(self,text):
emo = te.get_emotion(text)
return max(emo, key=emo.get) # The output we received,
def _trends_to_csv(self, trends_dict, path="C:/fake-news-framework_Py3/data/input/tryout/"):
topics = []
tweets = []
authors = []
topic_tweet_connection = []
for trend in trends_dict.keys():
for topic in trends_dict[trend].claims:
topics.append({'claim_id':topic.id,'title': topic.name}) # check what is the input
for tweet in topic.tweets:
topic_tweet_connection.append({'claim_id': topic.id, 'post_id': tweet.id})
tweets.append({'post_id':tweet.id,'author':tweet.author_name,'content':tweet.content,'retweet_count':tweet.retweet_count, 'favorite_count':tweet.favorite_count})
authors.append({'name':tweet.author_name})
pd.DataFrame(topics, columns=claims_columns).to_csv(path + "claims.csv",index=False)
pd.DataFrame(tweets, columns=post_columns).to_csv(path + "posts.csv",index=False)
pd.DataFrame(authors, columns=author_columns).to_csv(path + "authors.csv",index=False)
pd.DataFrame(topic_tweet_connection, columns=connection_columns).to_csv(path + "claim_tweet_connection.csv",index=False)
self.i+=1
def _classify_topic(self):
subprocess.call(['python','run_dataset_builder.py','configuration/config_demo.ini'],cwd= r'C:/fake-news-framework_Py3',shell=True)
results = pd.read_csv("C:/fake-news-framework_Py3/data/output/D/labeled_predictions.csv")[['author_guid','pred']]
return results
def analyze_trends(self, trends_dict, callback): # trends_dict is type of dict {<trend name> : <Trend>}
processed_data = {}
if len(trends_dict)==0:
return
self._trends_to_csv(trends_dict)
results = self._classify_topic()
print("got classifier results\nparsing the results and running sentiment and emotion")
for trend in trends_dict.keys():
print("start trend {}".format(trend))
if trend not in processed_data:
processed_data[trend] = list()
for topic in trends_dict[trend].claims:
tweets = list()
for tweet in topic.tweets:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
# sentiment = randint(-3, 3)
sentiment = self.get_sentiment(tweet.content)
# rand = randrange(6)
emotion = self.get_emotion(tweet.content)
analyzed_tweet = AnalyzedTweet(tweet.id, tweet.author_name, tweet.content,tweet.location,tweet.date,
tweet.trend_id,tweet.favorite_count,tweet.retweet_count, emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
print(f"add tweet {tweet} to the topic {topic}")
print(f"save the topic {topic}, with the list of tweets: {tweets}")
processed_data[trend].append(Claim(topic.name, tweets,topic.id))
| results['pred'] = results['pred'].apply(lambda x:"True" if x else "Fake")
return callback(processed_data, trends_dict,results)
def analyze_snopes(self, data, callback): # data is type of dict {<claim name> : list <tweets>}
# print(data)
# processed_data = {}
# for key in data.keys():
# if key not in processed_data:
# processed_data[key]={}
# for tweet in data[key].keys():
# processed_data[key][tweet]={}
# rand = randrange(100)
# if rand < 50:
# processed_data[key][tweet]['prediction'] = "wow it's fake"
# else:
# processed_data[key][tweet]['prediction'] = "100% true"
# sentiment = randint(-3, 3)
# processed_data[key][tweet]['sentiment'] = sentiment
# rand = randrange(6)
# processed_data[key][tweet]['emotional'] = get_emotion_by_id(rand)
processed_data = {}
for claim in data.keys():
# if claim not in processed_data:
# processed_data[claim]= list()
tweets = list()
for tweet in data[claim]:
rand = randrange(100)
if rand < 50:
prediction = "fake"
else:
prediction = "true"
sentiment = randint(-3, 3)
rand = randrange(6)
emotion = get_emotion_by_id(rand)
analyzed_tweet = AnalyzedTweet(tweet['id'], tweet['author'], tweet['content'], emotion, sentiment,
prediction)
tweets.append(analyzed_tweet)
if claim in processed_data.keys():
processed_data[claim].append(Claim(claim, tweets))
else:
processed_data[claim] = Claim(claim, tweets)
time.sleep(1)
return callback(processed_data)
def get_claims_from_trend(self, trends_tweets):
claims = {'claim1': {}, 'claim2': {}}
for status in trends_tweets:
rand = randrange(10)
# print(status.id)
# print(status.text)
# print(status.author.name)
if rand < 5:
claims["claim1"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
else:
# print(status)
claims["claim2"][status.id]= {'id': status.id, 'author': status.author_name, 'content': status.content}
return claims
def _get_claim_from_trend(self, trends_tweets):
print("topic model")
df = pd.DataFrame([tweet.__dict__ for tweet in trends_tweets])
df = df[['id', 'content','author_name']]
if len(df) < 15:
print("less then 15 tweets, creating 1 topic")
from collections import Counter
claim_text = ' '.join([txt[0] for txt in
Counter(" ".join(df['content'].str.replace("RT", '').values).split(' ')).most_common | time.sleep(1) | random_line_split |
MapScreen.js | constructor(props) {
super(props);
this.state = {
startValue: 'Start',
initialCoords:[
{latitude:34.073026, longitude:-118.465619},
{latitude:34.067223, longitude:-118.410851}
],
coordinates: [
{
latitude: 34.06279,
longitude: -118.44390,
},
{
latitude: 34.06241,
longitude: -118.44375,
},
],
clocation: {
latitude: 34.06637,
longitude:-118.44524,
},
dur: null,
dis: null,
saveWalk:{
startingLocation: null,
destinationLocation: null,
forZoom: { distance: Number, duration: Number, coordinates: [] }
},
modalVisible: false,
currentPath: "Current Path",
premadePath: false,
description: "",
name: "",
};
this.mapView = null;
}
// Start and Stop route button functionality
onStartWalk = () =>{
this.setState({
saveWalk:{
startingLocation:this.state.coordinates[1],
destinationLocation: this.state.coordinates[0]},
})
if(this.state.startValue=='Start'){
this.setState({
startValue:'Stop'
});
this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{
edgePadding: {
right: (width / 10),
bottom: (height / 20),
left: (width / 10),
top: (height / 20),
}
}
);
}
else{
this.setState({
startValue:'Start'
});
this.mapView.fitToCoordinates(this.state.initialCoords,{
edgePadding: {
right: width,
bottom: height,
left: width,
top: height
}
}
);
}
}
// Saves user's walks to database
saveWalk = (visible) => {
var params = JSON.stringify({
'name': this.state.name,
'description': this.state.description,
'coordinates': this.state.coordinates,
'profile': [global.session_id],
});
axios
.post("http://127.0.0.1:8000/walks/", params,
{"headers": {
'content-Type': 'application/json'
}})
.then(this.setModalVisible(visible))
.catch(error => console.log(error)
);
console.log(params)
}
setModalVisible = (visible) => {
this.setState({modalVisible:visible});
}
// When map is pressed, route is created from current location
onMapPress = (e) => {
this.setState({
coordinates: [
e.nativeEvent.coordinate,
this.state.clocation
],
currentPath: "Current Path",
premadePath: false,
});
}
onRoutePress = (long, lat ) => {
this.setState({
coordinates: [
{latitude: lat, longitude: long},
this.state.clocation
],
});
}
// Getting walks data for explore portion
componentDidMount() {
this.currentLocation();
this.intervalID = setInterval(this.currentLocation.bind(this), 1000);
axios
.get("http://127.0.0.1:8000/walks")
.then(response => this.setState({walks: response.data.results}))
.catch(error => console.log(error)
);
}
componentWillUnmount() {
clearInterval(this.intervalID);
}
currentLocation = () => {
navigator.geolocation.getCurrentPosition(
position => {
const latitude = position.coords.latitude;
const longitude = position.coords.longitude;
this.setState({
clocation: {latitude, longitude}
});
},
{ enableHighAccuracy: true, timeout:20000, maximumAge: 1000}
);
};
setPremadePath = (item) => {
this.setState({
coordinates: item.coordinates,
currentPath: item.name,
premadePath: true,
})
console.log(item.coordinates)
}
| () {
const { modalVisible } = this.state;
let button;
button=
<TouchableOpacity style={Buttons.brownbuttonSmall}
onPress={() => this.setModalVisible(!modalVisible)}>
<Text style={{color:'white', alignSelf: "center"}}>Save</Text>
</TouchableOpacity>
return (
<View style={styles.container}>
{/* Map and current route display */}
<View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}>
<MapView
initialRegion={{
latitude: LATITUDE,
longitude: LONGITUDE,
latitudeDelta: LATITUDE_DELTA,
longitudeDelta: LONGITUDE_DELTA,
}}
style={StyleSheet.absoluteFill}
ref={c => this.mapView = c}
onPress={this.onMapPress}
zoomEnabled={true}
>
{this.state.coordinates.map((coordinate, index) =>
<Marker key={`coordinate_${index}`} coordinate={coordinate} />
)}
{(this.state.coordinates.length >= 2) && (
<MapViewDirections
origin={this.state.coordinates[0]}
destination={this.state.coordinates[this.state.coordinates.length-1]}
waypoints={this.state.coordinates}
mode="WALKING"
apikey={GOOGLE_MAPS_APIKEY}
strokeWidth={3}
strokeColor="blue"
optimizeWaypoints={true}
onStart={(params) => {
}}
onReady={result => {
this.setState({
dis: result.distance,
dur: result.duration,
forZoom: result
})
}}
onError={(errorMessage) => {
}}
/>
)}
<Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}>
<MaterialIcons name="my-location" size={24} color={Colors.brown} />
</Marker>
</MapView>
<Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}>
Click to where you want to go on the map </Text>
{/* Current Path window on map */}
<View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}>
<Text style={styles.title}>{this.state.currentPath}</Text>
<View style={{flexDirection: "row"}}>
<View style={{width: '50%'}}>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text>
</View>
<View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}>
{button}
</View>
</View>
</View>
</View>
<TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}}
onPress={ this.onStartWalk }
>
<Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text>
</TouchableOpacity>
{/* Window for saving a route */}
<Modal style ={{marginTop: "50%"}}
animationType="slide"
transparent={true}
visible={modalVisible}
onRequestClose={() => {
this.setModalVisible(!modalVisible);
}}
>
<View style={{marginTop: '50%'}}>
<View style={styles.saveView}>
<Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text>
<TextInput
style={styles.input}
placeholder="Enter a title"
onChangeText={(name) => this.setState({name})}
/>
<Text style={styles.modalText}>Description</Text>
<TextInput
style={styles.input}
placeholder="Enter a description"
returnKeyType="done"
onChangeText={(description) => this.setState({description})}
/>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.saveWalk(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Save</Text>
</TouchableOpacity>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.setModalVisible(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text>
</TouchableOpacity>
</View>
</View>
</Modal>
{/* Explore other routes portion */}
<View style={{width: '100%', height: '65%', alignSelf: | render | identifier_name |
MapScreen.js | -118.44390,
},
{
latitude: 34.06241,
longitude: -118.44375,
},
],
clocation: {
latitude: 34.06637,
longitude:-118.44524,
},
dur: null,
dis: null,
saveWalk:{
startingLocation: null,
destinationLocation: null,
forZoom: { distance: Number, duration: Number, coordinates: [] }
},
modalVisible: false,
currentPath: "Current Path",
premadePath: false,
description: "",
name: "",
};
this.mapView = null;
}
// Start and Stop route button functionality
onStartWalk = () =>{
this.setState({
saveWalk:{
startingLocation:this.state.coordinates[1],
destinationLocation: this.state.coordinates[0]},
})
if(this.state.startValue=='Start'){
this.setState({
startValue:'Stop'
});
this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{
edgePadding: {
right: (width / 10),
bottom: (height / 20),
left: (width / 10),
top: (height / 20),
}
}
);
}
else{
this.setState({
startValue:'Start'
});
this.mapView.fitToCoordinates(this.state.initialCoords,{
edgePadding: {
right: width,
bottom: height,
left: width,
top: height
}
}
);
}
}
// Saves user's walks to database
saveWalk = (visible) => {
var params = JSON.stringify({
'name': this.state.name,
'description': this.state.description,
'coordinates': this.state.coordinates,
'profile': [global.session_id],
});
axios
.post("http://127.0.0.1:8000/walks/", params,
{"headers": {
'content-Type': 'application/json'
}})
.then(this.setModalVisible(visible))
.catch(error => console.log(error)
);
console.log(params)
}
setModalVisible = (visible) => {
this.setState({modalVisible:visible});
}
// When map is pressed, route is created from current location
onMapPress = (e) => {
this.setState({
coordinates: [
e.nativeEvent.coordinate,
this.state.clocation
],
currentPath: "Current Path",
premadePath: false,
});
}
onRoutePress = (long, lat ) => {
this.setState({
coordinates: [
{latitude: lat, longitude: long},
this.state.clocation
],
});
}
// Getting walks data for explore portion
componentDidMount() {
this.currentLocation();
this.intervalID = setInterval(this.currentLocation.bind(this), 1000);
axios
.get("http://127.0.0.1:8000/walks")
.then(response => this.setState({walks: response.data.results}))
.catch(error => console.log(error)
);
}
componentWillUnmount() {
clearInterval(this.intervalID);
}
currentLocation = () => {
navigator.geolocation.getCurrentPosition(
position => {
const latitude = position.coords.latitude;
const longitude = position.coords.longitude;
this.setState({
clocation: {latitude, longitude}
});
},
{ enableHighAccuracy: true, timeout:20000, maximumAge: 1000}
);
};
setPremadePath = (item) => {
this.setState({
coordinates: item.coordinates,
currentPath: item.name,
premadePath: true,
})
console.log(item.coordinates)
}
render() {
const { modalVisible } = this.state;
let button;
button=
<TouchableOpacity style={Buttons.brownbuttonSmall}
onPress={() => this.setModalVisible(!modalVisible)}>
<Text style={{color:'white', alignSelf: "center"}}>Save</Text>
</TouchableOpacity>
return (
<View style={styles.container}>
{/* Map and current route display */}
<View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}>
<MapView
initialRegion={{
latitude: LATITUDE,
longitude: LONGITUDE,
latitudeDelta: LATITUDE_DELTA,
longitudeDelta: LONGITUDE_DELTA,
}}
style={StyleSheet.absoluteFill}
ref={c => this.mapView = c}
onPress={this.onMapPress}
zoomEnabled={true}
>
{this.state.coordinates.map((coordinate, index) =>
<Marker key={`coordinate_${index}`} coordinate={coordinate} />
)}
{(this.state.coordinates.length >= 2) && (
<MapViewDirections
origin={this.state.coordinates[0]}
destination={this.state.coordinates[this.state.coordinates.length-1]}
waypoints={this.state.coordinates}
mode="WALKING"
apikey={GOOGLE_MAPS_APIKEY}
strokeWidth={3}
strokeColor="blue"
optimizeWaypoints={true}
onStart={(params) => {
}}
onReady={result => {
this.setState({
dis: result.distance,
dur: result.duration,
forZoom: result
})
}}
onError={(errorMessage) => {
}}
/>
)}
<Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}>
<MaterialIcons name="my-location" size={24} color={Colors.brown} />
</Marker>
</MapView>
<Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}>
Click to where you want to go on the map </Text>
{/* Current Path window on map */}
<View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}>
<Text style={styles.title}>{this.state.currentPath}</Text>
<View style={{flexDirection: "row"}}>
<View style={{width: '50%'}}>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text>
</View>
<View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}>
{button}
</View>
</View>
</View>
</View>
<TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}}
onPress={ this.onStartWalk }
>
<Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text>
</TouchableOpacity>
{/* Window for saving a route */}
<Modal style ={{marginTop: "50%"}}
animationType="slide"
transparent={true}
visible={modalVisible}
onRequestClose={() => {
this.setModalVisible(!modalVisible);
}}
>
<View style={{marginTop: '50%'}}>
<View style={styles.saveView}>
<Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text>
<TextInput
style={styles.input}
placeholder="Enter a title"
onChangeText={(name) => this.setState({name})}
/>
<Text style={styles.modalText}>Description</Text>
<TextInput
style={styles.input}
placeholder="Enter a description"
returnKeyType="done"
onChangeText={(description) => this.setState({description})}
/>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.saveWalk(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Save</Text>
</TouchableOpacity>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.setModalVisible(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text>
</TouchableOpacity>
</View>
</View>
</Modal>
{/* Explore other routes portion */}
<View style={{width: '100%', height: '65%', alignSelf: 'center', flex: 1, backgroundColor: '#F4ECC6', padding: 3, borderWidth: 1, borderColor: '#675a5a'}}>
<SafeAreaView style={{marginBottom: '6%'}}>
<Text style={styles.title}> Explore </Text>
<FlatList | data={this.state.walks}
renderItem={({item}) => (
<TouchableOpacity style={styles.item}
onPress={() => this.setPremadePath(item)}> | random_line_split |
|
MapScreen.js | constructor(props) {
super(props);
this.state = {
startValue: 'Start',
initialCoords:[
{latitude:34.073026, longitude:-118.465619},
{latitude:34.067223, longitude:-118.410851}
],
coordinates: [
{
latitude: 34.06279,
longitude: -118.44390,
},
{
latitude: 34.06241,
longitude: -118.44375,
},
],
clocation: {
latitude: 34.06637,
longitude:-118.44524,
},
dur: null,
dis: null,
saveWalk:{
startingLocation: null,
destinationLocation: null,
forZoom: { distance: Number, duration: Number, coordinates: [] }
},
modalVisible: false,
currentPath: "Current Path",
premadePath: false,
description: "",
name: "",
};
this.mapView = null;
}
// Start and Stop route button functionality
onStartWalk = () =>{
this.setState({
saveWalk:{
startingLocation:this.state.coordinates[1],
destinationLocation: this.state.coordinates[0]},
})
if(this.state.startValue=='Start') |
else{
this.setState({
startValue:'Start'
});
this.mapView.fitToCoordinates(this.state.initialCoords,{
edgePadding: {
right: width,
bottom: height,
left: width,
top: height
}
}
);
}
}
// Saves user's walks to database
saveWalk = (visible) => {
var params = JSON.stringify({
'name': this.state.name,
'description': this.state.description,
'coordinates': this.state.coordinates,
'profile': [global.session_id],
});
axios
.post("http://127.0.0.1:8000/walks/", params,
{"headers": {
'content-Type': 'application/json'
}})
.then(this.setModalVisible(visible))
.catch(error => console.log(error)
);
console.log(params)
}
setModalVisible = (visible) => {
this.setState({modalVisible:visible});
}
// When map is pressed, route is created from current location
onMapPress = (e) => {
this.setState({
coordinates: [
e.nativeEvent.coordinate,
this.state.clocation
],
currentPath: "Current Path",
premadePath: false,
});
}
onRoutePress = (long, lat ) => {
this.setState({
coordinates: [
{latitude: lat, longitude: long},
this.state.clocation
],
});
}
// Getting walks data for explore portion
componentDidMount() {
this.currentLocation();
this.intervalID = setInterval(this.currentLocation.bind(this), 1000);
axios
.get("http://127.0.0.1:8000/walks")
.then(response => this.setState({walks: response.data.results}))
.catch(error => console.log(error)
);
}
componentWillUnmount() {
clearInterval(this.intervalID);
}
currentLocation = () => {
navigator.geolocation.getCurrentPosition(
position => {
const latitude = position.coords.latitude;
const longitude = position.coords.longitude;
this.setState({
clocation: {latitude, longitude}
});
},
{ enableHighAccuracy: true, timeout:20000, maximumAge: 1000}
);
};
setPremadePath = (item) => {
this.setState({
coordinates: item.coordinates,
currentPath: item.name,
premadePath: true,
})
console.log(item.coordinates)
}
render() {
const { modalVisible } = this.state;
let button;
button=
<TouchableOpacity style={Buttons.brownbuttonSmall}
onPress={() => this.setModalVisible(!modalVisible)}>
<Text style={{color:'white', alignSelf: "center"}}>Save</Text>
</TouchableOpacity>
return (
<View style={styles.container}>
{/* Map and current route display */}
<View style={{width: '100%', height: '65%', padding: '2%', alignSelf: 'center'}}>
<MapView
initialRegion={{
latitude: LATITUDE,
longitude: LONGITUDE,
latitudeDelta: LATITUDE_DELTA,
longitudeDelta: LONGITUDE_DELTA,
}}
style={StyleSheet.absoluteFill}
ref={c => this.mapView = c}
onPress={this.onMapPress}
zoomEnabled={true}
>
{this.state.coordinates.map((coordinate, index) =>
<Marker key={`coordinate_${index}`} coordinate={coordinate} />
)}
{(this.state.coordinates.length >= 2) && (
<MapViewDirections
origin={this.state.coordinates[0]}
destination={this.state.coordinates[this.state.coordinates.length-1]}
waypoints={this.state.coordinates}
mode="WALKING"
apikey={GOOGLE_MAPS_APIKEY}
strokeWidth={3}
strokeColor="blue"
optimizeWaypoints={true}
onStart={(params) => {
}}
onReady={result => {
this.setState({
dis: result.distance,
dur: result.duration,
forZoom: result
})
}}
onError={(errorMessage) => {
}}
/>
)}
<Marker coordinate={{latitude: this.state.clocation.latitude, longitude: this.state.clocation.longitude}}>
<MaterialIcons name="my-location" size={24} color={Colors.brown} />
</Marker>
</MapView>
<Text style={{marginTop: '10%', alignSelf: 'center', fontStyle: "italic", color: '#675a5a', backgroundColor: 'white'}}>
Click to where you want to go on the map </Text>
{/* Current Path window on map */}
<View style={{ flex: 'stretch', backgroundColor: '#fffae3', padding: 3, borderRadius: 10, borderWidth: 2, borderColor: '#675a5a'}}>
<Text style={styles.title}>{this.state.currentPath}</Text>
<View style={{flexDirection: "row"}}>
<View style={{width: '50%'}}>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Distance: {((this.state.dis)/1.609).toFixed(2)} miles</Text>
<Text style={{marginLeft: '2%', marginBottom: '1%', fontSize: 12}}>Time: {((this.state.dur)/1).toFixed(2)} min.</Text>
</View>
<View style={{flex: 1, width: '50%', alignItems: 'flex-end', marginRight: '2%'}}>
{button}
</View>
</View>
</View>
</View>
<TouchableOpacity style={{padding: '2%', margin: '1%', backgroundColor: Colors.brown, borderRadius: 13, width: 400}}
onPress={ this.onStartWalk }
>
<Text style={{color: "white", fontSize: 15, alignSelf: "center"}}>{this.state.startValue}</Text>
</TouchableOpacity>
{/* Window for saving a route */}
<Modal style ={{marginTop: "50%"}}
animationType="slide"
transparent={true}
visible={modalVisible}
onRequestClose={() => {
this.setModalVisible(!modalVisible);
}}
>
<View style={{marginTop: '50%'}}>
<View style={styles.saveView}>
<Text style={[styles.modalText, {marginTop: '5%'}]}>Title</Text>
<TextInput
style={styles.input}
placeholder="Enter a title"
onChangeText={(name) => this.setState({name})}
/>
<Text style={styles.modalText}>Description</Text>
<TextInput
style={styles.input}
placeholder="Enter a description"
returnKeyType="done"
onChangeText={(description) => this.setState({description})}
/>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.saveWalk(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Save</Text>
</TouchableOpacity>
<TouchableOpacity
style={[Buttons.brownbuttonSmall, {alignSelf: 'center', marginTop: '5%'}]}
onPress={() => this.setModalVisible(!modalVisible)}
>
<Text style={{alignSelf: 'center', color: 'white'}}>Cancel</Text>
</TouchableOpacity>
</View>
</View>
</Modal>
{/* Explore other routes portion */}
<View style={{width: '100%', height: '65%', | {
this.setState({
startValue:'Stop'
});
this.mapView.fitToCoordinates(this.state.forZoom.coordinates,{
edgePadding: {
right: (width / 10),
bottom: (height / 20),
left: (width / 10),
top: (height / 20),
}
}
);
} | conditional_block |
yolov5_trt12.py | ), int(hand_['2']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)
def drawhand(img,outputs,img_width,img_height):
print(outputs)
pts_hand = {}
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
pts_hand[str(i)] = {}
pts_hand[str(i)] = {
"x": x,
"y": y,
}
draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线
# ------------- 绘制关键点
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1)
cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1)
class YoLov5TRT(object):
"""
description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
"""
def __init__(self, engine_file_path):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_yolo = TrtLite(engine_file_path=engine_file_path)
trt_yolo.print_info()
self.buffers = trt_yolo.allocate_io_buffers(1, True)
self.trt_yolo = trt_yolo
# 识别人手的21个关键点
self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
self.trt_lite21.print_info()
# 识别手势
self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
self.trt_lite_gesture.print_info()
def doInference(self,image_path):
threading.Thread.__init__(self)
# Do image preprocess
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path)
self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda()
bindings = [t.data_ptr() for t in self.buffers]
self.trt_yolo.execute(bindings, BATCH_SIZE)
host_outputs = self.buffers[1].clone().cpu().detach().numpy()
torch.cuda.synchronize()
print(host_outputs.shape)
output = host_outputs.ravel()
# Do postprocess
result_boxes, result_scores, result_classid = self.post_process(
output, origin_h, origin_w
)
print(output.shape,len(result_boxes))
# Draw rectangles and labels on the original image
for i in range(len(result_boxes)):
box = result_boxes[i]
print("box>>>",box)
# 截出手的部位
image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])]
# 推理手的21个特征点
hand_data = self.preprocess_hand(image_hand)
output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel())
# 推理手势
output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel())
print("gesture:",output_gesture)
index = np.argmax(output_gesture)
label = labels[index]
hand_width = int(box[2])-int(box[0])
hand_height = int(box[3])-int(box[1])
drawhand(image_hand,output21,hand_width,hand_height)
print("w,h:",hand_width,hand_height)
cv2.imwrite("hand_11.jpg", image_hand)
plot_one_box(
box,
image_raw,
label="{}:{:.2f}".format(
label, result_scores[i]
),
)
parent, filename = os.path.split(input_image_path)
save_name = os.path.join(parent, "output_" + filename)
# Save image
cv2.imwrite(save_name, image_raw)
print("save img success")
def doInference_resnet(self,trt_engine, data):
i2shape = 1
io_info = trt_eng | shape)
print(io_info)
d_buffers = trt_engine.allocate_io_buffers(i2shape, True)
print(io_info[1][2])
d_buffers[0] = data.cuda()
bindings = [t.data_ptr() for | ine.get_io_info(i2 | identifier_name |
yolov5_trt12.py | ), int(hand_['2']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)
def drawhand(img,outputs,img_width,img_height):
print(outputs)
pts_hand = {}
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
pts_hand[str(i)] = {}
pts_hand[str(i)] = {
"x": x,
"y": y,
}
draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线
# ------------- 绘制关键点
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1)
cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1)
class YoLov5TRT(object):
"""
description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
"""
def __init__(self, engine_file_path):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_yolo = TrtLite(engine_file_path=engine_file_path)
trt_yolo.print_info()
self.buffers = trt_yolo.allocate_io_buffers(1, True)
self.trt_yolo = trt_yolo
# 识别人手的21个关键点
self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
self.trt_lite21.print_info()
# 识别手势
self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
self.trt_lite_gesture.print_info()
def doInference(self,image_path):
threading.Thread.__init__(self)
# Do imag | print(output.shape,len(result_boxes))
# Draw rectangles and labels on the original image
for i in range(len(result_boxes)):
box = result_boxes[i]
print("box>>>",box)
# 截出手的部位
image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])]
# 推理手的21个特征点
hand_data = self.preprocess_hand(image_hand)
output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel())
# 推理手势
output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel())
print("gesture:",output_gesture)
index = np.argmax(output_gesture)
label = labels[index]
hand_width = int(box[2])-int(box[0])
hand_height = int(box[3])-int(box[1])
drawhand(image_hand,output21,hand_width,hand_height)
print("w,h:",hand_width,hand_height)
cv2.imwrite("hand_11.jpg", image_hand)
plot_one_box(
box,
image_raw,
label="{}:{:.2f}".format(
label, result_scores[i]
),
)
parent, filename = os.path.split(input_image_path)
save_name = os.path.join(parent, "output_" + filename)
# Save image
cv2.imwrite(save_name, image_raw)
print("save img success")
def doInference_resnet(self,trt_engine, data):
i2shape = 1
io_info
= trt_engine.get_io_info(i2shape)
print(io_info)
d_buffers = trt_engine.allocate_io_buffers(i2shape, True)
print(io_info[1][2])
d_buffers[0] = data.cuda()
bindings = [t.data_ptr() | e preprocess
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path)
self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda()
bindings = [t.data_ptr() for t in self.buffers]
self.trt_yolo.execute(bindings, BATCH_SIZE)
host_outputs = self.buffers[1].clone().cpu().detach().numpy()
torch.cuda.synchronize()
print(host_outputs.shape)
output = host_outputs.ravel()
# Do postprocess
result_boxes, result_scores, result_classid = self.post_process(
output, origin_h, origin_w
)
| identifier_body |
yolov5_trt12.py | int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)
def drawhand(img,outputs,img_width,img_height):
print(outputs)
pts_hand = {}
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
pts_hand[str(i)] = {}
pts_hand[str(i)] = {
"x": x,
"y": y,
}
draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线
# ------------- 绘制关键点
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1)
cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1)
class YoLov5TRT(object):
"""
description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
"""
def __init__(self, engine_file_path):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_yolo = TrtLite(engine_file_path=engine_file_path)
trt_yolo.print_info()
self.buffers = trt_yolo.allocate_io_buffers(1, True)
self.trt_yolo = trt_yolo
# 识别人手的21个关键点
self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
self.trt_lite21.print_info()
# 识别手势
self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
self.trt_lite_gesture.print_info()
def doInference(self,image_path):
threading.Thread.__init__(self)
# Do image preprocess
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path)
self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda()
bindings = [t.data_ptr() for t in self.buffers]
self.trt_yolo.execute(bindings, BATCH_SIZE)
host_outputs = self.buffers[1].clone().cpu().detach().numpy()
torch.cuda.synchronize()
print(host_outputs.shape)
output = host_outputs.ravel()
# Do postprocess
result_boxes, result_scores, result_classid = self.post_process(
output, origin_h, origin_w
)
print(output.shape,len(result_boxes))
# Draw rectangles and labels on the original image
for i in range(len(result_boxes)):
box = result_boxes[i]
print("box>>>",box)
# 截出手的部位
image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])]
# 推理手的21个特征点
hand_data = self.preprocess_hand(image_hand)
output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel())
# 推理手势
output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel())
print("gesture:",output_gesture)
index = np.argmax(output_gesture)
label = labels[index]
hand_width = int(box[2])-int(box[0])
hand_height = int(box[3])-int(box[1])
drawhand(image_hand,output21,hand_width,hand_height)
print("w,h:",hand_width,hand_height)
cv2.imwrite("hand_11.jpg", image_hand)
plot_one_box(
box,
image_raw,
label="{}:{:.2f}".format(
label, result_scores[i]
),
)
parent, filename = os.path.split(input_image_path)
save_name = os.path.join(parent, "output_" + filename)
# Save image
cv2.imwrite(save_name, image_raw)
print("save img success")
def doInference_resnet(self,trt_engine, data):
i2shape = 1
io_info = trt_engine.get_io_info(i2shape)
print(io_info)
d_buffers = trt_engine.allocate_io_buffers(i2shape, True)
print(io_info[1][2])
d_buffers[0] = data.cuda()
bindings = [t.data_ptr() for t in d_buffers]
# 进行推理
trt_engine.execute(bindings, i2shape)
#
output_data_trt = d_buffers[1].clone().cpu().detach().numpy()
torch.cuda.synchronize()
host_out = output_data_trt.ravel()
return host_out
def preprocess_hand(self,img):
img_width = img.shape[1]
img_height = img.shape[0]
print(img.shape)
# 输入图片预处理
img_ = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC)
img_ = img_.astype(np.float32)
img_ = (img_ - 128.) / 256.
img_ = img_.transpose(2, 0, 1)
img_ = torch.from_numpy(img_)
img_ = img_.unsqueeze_(0)
return img_
def preprocess_image(self, input_image_path):
"""
description: Read an image from image path, convert it to RGB,
resize and pad it to target size, normalize to [0,1],
transform to NCHW format.
param:
input_image_path: str, image path
return:
image: the processed image | image_raw: the original image
h: original height
w: original width
""" | random_line_split |
|
yolov5_trt12.py | ), int(hand_['2']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)
cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)
def drawhand(img,outputs,img_width,img_height):
print(outputs)
pts_hand = {}
for i in range(int(outputs.shape[0] / 2)):
|
draw_bd_handpose(img, pts_hand, 0, 0) # 绘制关键点连线
# ------------- 绘制关键点
for i in range(int(outputs.shape[0] / 2)):
x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
cv2.circle(img, (int(x), int(y)), 3, (255, 50, 60), -1)
cv2.circle(img, (int(x), int(y)), 1, (255, 150, 180), -1)
class YoLov5TRT(object):
"""
description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
"""
def __init__(self, engine_file_path):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_yolo = TrtLite(engine_file_path=engine_file_path)
trt_yolo.print_info()
self.buffers = trt_yolo.allocate_io_buffers(1, True)
self.trt_yolo = trt_yolo
# 识别人手的21个关键点
self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
self.trt_lite21.print_info()
# 识别手势
self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
self.trt_lite_gesture.print_info()
def doInference(self,image_path):
threading.Thread.__init__(self)
# Do image preprocess
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_path)
self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda()
bindings = [t.data_ptr() for t in self.buffers]
self.trt_yolo.execute(bindings, BATCH_SIZE)
host_outputs = self.buffers[1].clone().cpu().detach().numpy()
torch.cuda.synchronize()
print(host_outputs.shape)
output = host_outputs.ravel()
# Do postprocess
result_boxes, result_scores, result_classid = self.post_process(
output, origin_h, origin_w
)
print(output.shape,len(result_boxes))
# Draw rectangles and labels on the original image
for i in range(len(result_boxes)):
box = result_boxes[i]
print("box>>>",box)
# 截出手的部位
image_hand = image_raw[int(box[1]):int(box[3]),int(box[0]):int(box[2])]
# 推理手的21个特征点
hand_data = self.preprocess_hand(image_hand)
output21 = self.doInference_resnet(self.trt_lite21,hand_data.ravel())
# 推理手势
output_gesture = self.doInference_resnet(self.trt_lite_gesture, hand_data.ravel())
print("gesture:",output_gesture)
index = np.argmax(output_gesture)
label = labels[index]
hand_width = int(box[2])-int(box[0])
hand_height = int(box[3])-int(box[1])
drawhand(image_hand,output21,hand_width,hand_height)
print("w,h:",hand_width,hand_height)
cv2.imwrite("hand_11.jpg", image_hand)
plot_one_box(
box,
image_raw,
label="{}:{:.2f}".format(
label, result_scores[i]
),
)
parent, filename = os.path.split(input_image_path)
save_name = os.path.join(parent, "output_" + filename)
# Save image
cv2.imwrite(save_name, image_raw)
print("save img success")
def doInference_resnet(self,trt_engine, data):
i2shape = 1
io_info = trt_engine.get_io_info(i2shape)
print(io_info)
d_buffers = trt_engine.allocate_io_buffers(i2shape, True)
print(io_info[1][2])
d_buffers[0] = data.cuda()
bindings = [t.data_ptr() for | x = (outputs[i * 2 + 0] * float(img_width))
y = (outputs[i * 2 + 1] * float(img_height))
pts_hand[str(i)] = {}
pts_hand[str(i)] = {
"x": x,
"y": y,
} | conditional_block |
game.js | // jetpack
this.jetpack = false;
this.jetpackTimer = 5000;
this.fireTick = 0; // tick to control fire frequency
var that = this;
var chkCol = function(t, h, e){
// check collision with map
// or with enemy
if(t == "map"){
var yBelow = Math.ceil(that.y)-1;
var xBelow1 = Math.floor(that.x);
var xBelow2 = Math.ceil(that.x);
// check collision with tiles below
// and on either side of hero
var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow);
var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow);
if(tileBelow1 == 1 || tileBelow2 == 1){
return true;
}
return false;
}else{
// check collision with object h and object e
var abs = Math.abs;
return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1));
}
};
this.update = function(i){
// add interval to all ticks
this.xTick += i;
this.yTick += i;
this.fireTick += i;
// check if any relevant keys are being pressed
var left = game.keysPressed.indexOf(37);
var right = game.keysPressed.indexOf(39);
var jetpack = game.keysPressed.indexOf(40);
var fire = game.keysPressed.indexOf(32);
// if down arrow is pressed, set jetpack to true and
// update jetpackTimer with interval
// if allowed jetpack time is exceeded, set jetpack to false
this.jetpack = jetpack > -1 ? true : false;
this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer;
if(this.jetpackTimer <= 0){
this.jetpack = false;
this.jetpackTimer = 0;
}
// check for right and left arrow key and set dx accordingly
this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0;
// check for space key and run fire method if pressed
if(fire > -1)this.fire();
// if jetpack is true, set dy accordingly
if(this.jetpack){
this.dy = 0.25;
}else{
// if jetpack is false, check for
// collision with tile and set dy to
// 0 if true and apply fall to dy if
// false.
if(this.y % 1 == 0){
if(chkCol('map')){
this.dy = 0;
this.yTick = 0;
}else{
this.dy = -0.25;
}
}
}
// apply dx and dy to x and y every 50 ms
// then reset x and y Ticks
if(this.xTick >= 50){
this.x += this.dx;
this.xTick = 0;
}
if(this.yTick >= 50){
this.y += this.dy;
this.yTick = 0;
}
// change directional frame accordingly
var f = this.frame;
if(this.dx > 0){
f = 1;
}else if(this.dx < 0){
f = 0;
}
// add 2 to frame if jetpack is true
if(this.jetpack && (f == 1 || f == 0)){
f = f + 2;
}else if(!this.jetpack && (f > 1)){
f = f - 2;
}
this.frame = f;
// check if hero has gone off screen and end game if so
if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){
game.loadLevel('over');
}
// check for collision with enemy
// this also checks if there are no enemies
// left and loads the next level if that's
// the case
var levelOver = true;
for(var i in game.objQ){
if(game.objQ[i].constructor.name == 'Enemy'){
levelOver = false;
e = game.objQ[i];
if(chkCol('enemy', this, e)){
game.loadLevel('over');
break;
}
}
}
if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level
};
this.draw = function(){
// draws the jetpack timer and the hero sprite
game.cx.fillStyle=txtColor;
game.cx.font = "12px verdana";
game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25);
sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame);
};
this.fire = function(){
// fires a bullet every 250 ms if the fire
// key is pressed
if(this.fireTick > 250){
// create a new bullet object at the hero's
// position and add it to the object queue
var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction
var bullet = new Bullet(game, this.x, this.y, dir, 'bullet');
game.objQ.push(bullet);
this.fireTick = 0;
}
};
};
var Bullet = function(game, x, y, dir, sprite){
this.s = sprite;
this.x = x;
this.y = y;
this.dir = dir; // direction
this.animTick = 0; // move bullet every 25 ms
var chkCol = function(b, e){
// check collision between b and e objects
// b = bullet, e = enemy
var abs = Math.abs;
return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1));
};
this.destroy = function(){
// if bullet exists in object queue
// remove it
var index = game.objQ.indexOf(this);
if(index > -1){
game.objQ.splice(index, 1);
}
};
this.update = function(i){
// update bullet x position every animTick
this.animTick += i;
if(this.animTick >= 25){
this.x += dir > 0 ? 0.25 : -0.25;
this.animTick = 0;
}
// loop through enemy objects and check for
// collision. If collided, destroy bullet
// and enemy.
for(var i in game.objQ){
if(game.objQ[i].constructor.name == 'Enemy'){
e = game.objQ[i];
if(chkCol(this, e)){
this.destroy();
e.destroy();
break;
}
}
}
// destroy bullet if gone off screen
if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){
this.destroy();
}
};
this.draw = function(){
// draw bullet
sprites.draw(this.s,this.x*game.ts,this.y*game.ts);
};
return this;
};
function Enemy(game, group, sprite) | { // x, y, sx, sy,
this.s = sprite;
this.frame = 0; // start frame
// movement tick
this.xtick = 0;
this.ytick = 0;
// variables to restrict movement to given platform
// xs = xStart, xe = xEnd. They correspond to tile
// start and tile end positions
this.xs = typeof(group[0]) !== 'number' ? group[0][0] : group[0];
this.xe = typeof(group[0]) !== 'number' ? group[0][1]-1 : group[0]-1;
this.ys = typeof(group[1]) !== 'number' ? group[1][0] : group[1];
this.ye = typeof(group[1]) !== 'number' ? group[1][1]-1 : group[1]-1;
// set start position and speed
this.x = Math.floor(Math.random() * (this.xe - this.xs)) + this.xs;
this.y = Math.floor(Math.random() * (this.ye - this.ys)) + (this.ys+2);
this.dx = ((Math.random() * 100) + 25) * (Math.ceil((Math.random() * 2)) === 1 ? 1 : -1); | identifier_body |
|
game.js | callback;
this.image.src = rImage;
};
this.draw = function(sprite, x, y, frame){
var s = this.data[sprite]; //this sprite
frame = !frame ? 0 : frame; //default frame is 0
cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM);
};
};
/***********************/
/* private classes
/***********************/
var startScreen = function(){
// start screen.
// on update check if enter key is pressed
// load level 1 if enter is pressed
this.update = function(){
if(that.keysPressed.indexOf(13) > -1){
that.loadLevel(1);
}
};
this.draw = function(){
if(that.cx !== undefined){
that.cx.fillStyle=txtColor;
that.cx.font = "48px verdana";
that.cx.textAlign = "center";
that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75);
that.cx.font = "14px verdana";
that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2);
that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30);
that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60);
that.cx.font = "10px verdana";
that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100);
that.cx.font = "14px verdana";
that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140);
}
};
};
var overScreen = function(){
// game over screen.
// display scores.
// on update check if enter or esc key is pressed
// load level 1 if enter is pressed
// load start screen if esc is pressed
var totalTime = that.timePlayed;
that.timePlayed = 0;
this.update = function(){
if(that.keysPressed.indexOf(27) > -1){
that.loadLevel('start');
}else if(that.keysPressed.indexOf(13) > -1){
that.loadLevel(1);
}
};
this.draw = function(){
if(that.cx !== undefined){
var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1);
var scorePerLevelComplete = 100;
var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1);
score = isNaN(score) ? 0 : score; | that.cx.fillStyle=txtColor;
that.cx.font = "48px verdana";
that.cx.fillText("Game Over", that.w/2, that.h/2 - 100);
that.cx.font = "16px verdana";
that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50);
that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20);
that.cx.font = "12px verdana";
that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50);
that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80);
}
};
};
var Hero = function(game, x, y, sprite){
/***********************/
/* hero class
/***********************/
//start tile position
this.x = x;
this.y = y;
this.frame = 1; // start frame
this.s = sprite; // hero sprite
//hero movement speed
this.dx = 0;
this.dy = 0;
// tick used to control movement speed
// hero will move dx or dy distance every
// 50 ms
this.xTick = 0;
this.yTick = 0;
// jetpack
this.jetpack = false;
this.jetpackTimer = 5000;
this.fireTick = 0; // tick to control fire frequency
var that = this;
var chkCol = function(t, h, e){
// check collision with map
// or with enemy
if(t == "map"){
var yBelow = Math.ceil(that.y)-1;
var xBelow1 = Math.floor(that.x);
var xBelow2 = Math.ceil(that.x);
// check collision with tiles below
// and on either side of hero
var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow);
var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow);
if(tileBelow1 == 1 || tileBelow2 == 1){
return true;
}
return false;
}else{
// check collision with object h and object e
var abs = Math.abs;
return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1));
}
};
this.update = function(i){
// add interval to all ticks
this.xTick += i;
this.yTick += i;
this.fireTick += i;
// check if any relevant keys are being pressed
var left = game.keysPressed.indexOf(37);
var right = game.keysPressed.indexOf(39);
var jetpack = game.keysPressed.indexOf(40);
var fire = game.keysPressed.indexOf(32);
// if down arrow is pressed, set jetpack to true and
// update jetpackTimer with interval
// if allowed jetpack time is exceeded, set jetpack to false
this.jetpack = jetpack > -1 ? true : false;
this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer;
if(this.jetpackTimer <= 0){
this.jetpack = false;
this.jetpackTimer = 0;
}
// check for right and left arrow key and set dx accordingly
this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0;
// check for space key and run fire method if pressed
if(fire > -1)this.fire();
// if jetpack is true, set dy accordingly
if(this.jetpack){
this.dy = 0.25;
}else{
// if jetpack is false, check for
// collision with tile and set dy to
// 0 if true and apply fall to dy if
// false.
if(this.y % 1 == 0){
if(chkCol('map')){
this.dy = 0;
this.yTick = 0;
}else{
this.dy = -0.25;
}
}
}
// apply dx and dy to x and y every 50 ms
// then reset x and y Ticks
if(this.xTick >= 50){
this.x += this.dx;
this.xTick = 0;
}
if(this.yTick >= 50){
this.y += this.dy;
this.yTick = 0;
}
// change directional frame accordingly
var f = this.frame;
if(this.dx > 0){
f = 1;
}else if(this.dx < 0){
f = 0;
}
// add 2 to frame if jetpack is true
if(this.jetpack && (f == 1 || f == 0)){
f = f + 2;
}else if(!this.jetpack && (f > 1)){
f = f - 2;
}
this.frame = f;
// check if hero has gone off screen and end game if so
if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){
game.loadLevel('over');
}
// check for collision with enemy
// this also checks if there are no enemies
// left and loads the next level if that's
// the case
var levelOver = true;
for(var i in game.objQ){
if(game.objQ[i]. | var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time."
| random_line_split |
game.js | );
that.cx.font = "16px verdana";
that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50);
that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20);
that.cx.font = "12px verdana";
that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50);
that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80);
}
};
};
var Hero = function(game, x, y, sprite){
/***********************/
/* hero class
/***********************/
//start tile position
this.x = x;
this.y = y;
this.frame = 1; // start frame
this.s = sprite; // hero sprite
//hero movement speed
this.dx = 0;
this.dy = 0;
// tick used to control movement speed
// hero will move dx or dy distance every
// 50 ms
this.xTick = 0;
this.yTick = 0;
// jetpack
this.jetpack = false;
this.jetpackTimer = 5000;
this.fireTick = 0; // tick to control fire frequency
var that = this;
var chkCol = function(t, h, e){
// check collision with map
// or with enemy
if(t == "map"){
var yBelow = Math.ceil(that.y)-1;
var xBelow1 = Math.floor(that.x);
var xBelow2 = Math.ceil(that.x);
// check collision with tiles below
// and on either side of hero
var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow);
var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow);
if(tileBelow1 == 1 || tileBelow2 == 1){
return true;
}
return false;
}else{
// check collision with object h and object e
var abs = Math.abs;
return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1));
}
};
this.update = function(i){
// add interval to all ticks
this.xTick += i;
this.yTick += i;
this.fireTick += i;
// check if any relevant keys are being pressed
var left = game.keysPressed.indexOf(37);
var right = game.keysPressed.indexOf(39);
var jetpack = game.keysPressed.indexOf(40);
var fire = game.keysPressed.indexOf(32);
// if down arrow is pressed, set jetpack to true and
// update jetpackTimer with interval
// if allowed jetpack time is exceeded, set jetpack to false
this.jetpack = jetpack > -1 ? true : false;
this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer;
if(this.jetpackTimer <= 0){
this.jetpack = false;
this.jetpackTimer = 0;
}
// check for right and left arrow key and set dx accordingly
this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0;
// check for space key and run fire method if pressed
if(fire > -1)this.fire();
// if jetpack is true, set dy accordingly
if(this.jetpack){
this.dy = 0.25;
}else{
// if jetpack is false, check for
// collision with tile and set dy to
// 0 if true and apply fall to dy if
// false.
if(this.y % 1 == 0){
if(chkCol('map')){
this.dy = 0;
this.yTick = 0;
}else{
this.dy = -0.25;
}
}
}
// apply dx and dy to x and y every 50 ms
// then reset x and y Ticks
if(this.xTick >= 50){
this.x += this.dx;
this.xTick = 0;
}
if(this.yTick >= 50){
this.y += this.dy;
this.yTick = 0;
}
// change directional frame accordingly
var f = this.frame;
if(this.dx > 0){
f = 1;
}else if(this.dx < 0){
f = 0;
}
// add 2 to frame if jetpack is true
if(this.jetpack && (f == 1 || f == 0)){
f = f + 2;
}else if(!this.jetpack && (f > 1)){
f = f - 2;
}
this.frame = f;
// check if hero has gone off screen and end game if so
if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){
game.loadLevel('over');
}
// check for collision with enemy
// this also checks if there are no enemies
// left and loads the next level if that's
// the case
var levelOver = true;
for(var i in game.objQ){
if(game.objQ[i].constructor.name == 'Enemy'){
levelOver = false;
e = game.objQ[i];
if(chkCol('enemy', this, e)){
game.loadLevel('over');
break;
}
}
}
if(levelOver)loadLevel(game.currLevel + 1); // if level is complete load next level
};
this.draw = function(){
// draws the jetpack timer and the hero sprite
game.cx.fillStyle=txtColor;
game.cx.font = "12px verdana";
game.cx.fillText("Jetpack time remaining: " + (this.jetpackTimer/1000).toFixed(2) + " seconds", game.w-140, 25);
sprites.draw(this.s,this.x*game.ts,this.y*game.ts,this.frame);
};
this.fire = function(){
// fires a bullet every 250 ms if the fire
// key is pressed
if(this.fireTick > 250){
// create a new bullet object at the hero's
// position and add it to the object queue
var dir = [1,3].indexOf(this.frame) == -1 ? -1 : 1; // direction
var bullet = new Bullet(game, this.x, this.y, dir, 'bullet');
game.objQ.push(bullet);
this.fireTick = 0;
}
};
};
var Bullet = function(game, x, y, dir, sprite){
this.s = sprite;
this.x = x;
this.y = y;
this.dir = dir; // direction
this.animTick = 0; // move bullet every 25 ms
var chkCol = function(b, e){
// check collision between b and e objects
// b = bullet, e = enemy
var abs = Math.abs;
return (abs(b.x - e.x) * 2 < (1)) && (abs(b.y - e.y) * 2 < (1));
};
this.destroy = function(){
// if bullet exists in object queue
// remove it
var index = game.objQ.indexOf(this);
if(index > -1){
game.objQ.splice(index, 1);
}
};
this.update = function(i){
// update bullet x position every animTick
this.animTick += i;
if(this.animTick >= 25){
this.x += dir > 0 ? 0.25 : -0.25;
this.animTick = 0;
}
// loop through enemy objects and check for
// collision. If collided, destroy bullet
// and enemy.
for(var i in game.objQ){
if(game.objQ[i].constructor.name == 'Enemy'){
e = game.objQ[i];
if(chkCol(this, e)){
this.destroy();
e.destroy();
break;
}
}
}
// destroy bullet if gone off screen
if(this.x > game.tileW || this.x < 0 || this.y > game.tileH || this.y < 0){
this.destroy();
}
};
this.draw = function(){
// draw bullet
sprites.draw(this.s,this.x*game.ts,this.y*game.ts);
};
return this;
};
function | Enemy | identifier_name |
|
game.js | ;
this.image.src = rImage;
};
this.draw = function(sprite, x, y, frame){
var s = this.data[sprite]; //this sprite
frame = !frame ? 0 : frame; //default frame is 0
cx.drawImage(this.image, s.sx + frame * s.w, s.sy, s.w, s.h, x, y, s.w*s.dimM, s.h*s.dimM);
};
};
/***********************/
/* private classes
/***********************/
var startScreen = function(){
// start screen.
// on update check if enter key is pressed
// load level 1 if enter is pressed
this.update = function(){
if(that.keysPressed.indexOf(13) > -1){
that.loadLevel(1);
}
};
this.draw = function(){
if(that.cx !== undefined){
that.cx.fillStyle=txtColor;
that.cx.font = "48px verdana";
that.cx.textAlign = "center";
that.cx.fillText("Gravity Control", that.w/2, (that.h/2) - 75);
that.cx.font = "14px verdana";
that.cx.fillText("The world as we know it has ended, even Gravity is out of wack.", that.w/2, that.h/2);
that.cx.fillText("Zombies are everywhere. Your mission is to kill as many of them as you can.", that.w/2, that.h/2 + 30);
that.cx.fillText("You have a jetpack with enough fuel for 5 seconds per level. Use it wisely.", that.w/2, that.h/2 + 60);
that.cx.font = "10px verdana";
that.cx.fillText("{left and right arrow} move from side to side, {space} fire, {up arrow} use jetpack.", that.w/2, that.h/2 + 100);
that.cx.font = "14px verdana";
that.cx.fillText("Press ENTER to start.", that.w/2, that.h/2 + 140);
}
};
};
var overScreen = function(){
// game over screen.
// display scores.
// on update check if enter or esc key is pressed
// load level 1 if enter is pressed
// load start screen if esc is pressed
var totalTime = that.timePlayed;
that.timePlayed = 0;
this.update = function(){
if(that.keysPressed.indexOf(27) > -1){
that.loadLevel('start');
}else if(that.keysPressed.indexOf(13) > -1){
that.loadLevel(1);
}
};
this.draw = function(){
if(that.cx !== undefined){
var timePerLevel = parseInt(totalTime/1000) / (that.currLevel - 1);
var scorePerLevelComplete = 100;
var score = (scorePerLevelComplete - timePerLevel) * (that.currLevel - 1);
score = isNaN(score) ? 0 : score;
var scoreMsg = score > 0 ? ". Well done." : ". Opps, better luck next time."
that.cx.fillStyle=txtColor;
that.cx.font = "48px verdana";
that.cx.fillText("Game Over", that.w/2, that.h/2 - 100);
that.cx.font = "16px verdana";
that.cx.fillText("You completed " + (that.currLevel - 1) + " levels in " + (totalTime/1000).toFixed(2) + " seconds.", that.w/2, that.h/2 -50);
that.cx.fillText("Your score is " + score + scoreMsg, that.w/2, that.h/2 -20);
that.cx.font = "12px verdana";
that.cx.fillText("Press ESC to go back to start screen.", that.w/2, that.h/2 + 50);
that.cx.fillText("Press ENTER to go restart at level 1.", that.w/2, that.h/2 + 80);
}
};
};
var Hero = function(game, x, y, sprite){
/***********************/
/* hero class
/***********************/
//start tile position
this.x = x;
this.y = y;
this.frame = 1; // start frame
this.s = sprite; // hero sprite
//hero movement speed
this.dx = 0;
this.dy = 0;
// tick used to control movement speed
// hero will move dx or dy distance every
// 50 ms
this.xTick = 0;
this.yTick = 0;
// jetpack
this.jetpack = false;
this.jetpackTimer = 5000;
this.fireTick = 0; // tick to control fire frequency
var that = this;
var chkCol = function(t, h, e){
// check collision with map
// or with enemy
if(t == "map") | else{
// check collision with object h and object e
var abs = Math.abs;
return (abs(h.x - e.x) * 2 < (1)) && (abs(h.y - e.y) * 2 < (1));
}
};
this.update = function(i){
// add interval to all ticks
this.xTick += i;
this.yTick += i;
this.fireTick += i;
// check if any relevant keys are being pressed
var left = game.keysPressed.indexOf(37);
var right = game.keysPressed.indexOf(39);
var jetpack = game.keysPressed.indexOf(40);
var fire = game.keysPressed.indexOf(32);
// if down arrow is pressed, set jetpack to true and
// update jetpackTimer with interval
// if allowed jetpack time is exceeded, set jetpack to false
this.jetpack = jetpack > -1 ? true : false;
this.jetpackTimer = this.jetpack ? this.jetpackTimer - i : this.jetpackTimer;
if(this.jetpackTimer <= 0){
this.jetpack = false;
this.jetpackTimer = 0;
}
// check for right and left arrow key and set dx accordingly
this.dx = left > -1 ? -0.25 : right > -1 ? 0.25 : 0;
// check for space key and run fire method if pressed
if(fire > -1)this.fire();
// if jetpack is true, set dy accordingly
if(this.jetpack){
this.dy = 0.25;
}else{
// if jetpack is false, check for
// collision with tile and set dy to
// 0 if true and apply fall to dy if
// false.
if(this.y % 1 == 0){
if(chkCol('map')){
this.dy = 0;
this.yTick = 0;
}else{
this.dy = -0.25;
}
}
}
// apply dx and dy to x and y every 50 ms
// then reset x and y Ticks
if(this.xTick >= 50){
this.x += this.dx;
this.xTick = 0;
}
if(this.yTick >= 50){
this.y += this.dy;
this.yTick = 0;
}
// change directional frame accordingly
var f = this.frame;
if(this.dx > 0){
f = 1;
}else if(this.dx < 0){
f = 0;
}
// add 2 to frame if jetpack is true
if(this.jetpack && (f == 1 || f == 0)){
f = f + 2;
}else if(!this.jetpack && (f > 1)){
f = f - 2;
}
this.frame = f;
// check if hero has gone off screen and end game if so
if(this.x*game.ts > game.w || this.x*game.ts < 0 || this.y*game.ts > game.h || this.y*game.ts < game.topStart){
game.loadLevel('over');
}
// check for collision with enemy
// this also checks if there are no enemies
// left and loads the next level if that's
// the case
var levelOver = true;
for(var i in game.objQ){
if(game.objQ[i | {
var yBelow = Math.ceil(that.y)-1;
var xBelow1 = Math.floor(that.x);
var xBelow2 = Math.ceil(that.x);
// check collision with tiles below
// and on either side of hero
var tileBelow1 = game.levelObj.getTile(xBelow1, yBelow);
var tileBelow2 = game.levelObj.getTile(xBelow2, yBelow);
if(tileBelow1 == 1 || tileBelow2 == 1){
return true;
}
return false;
} | conditional_block |
terminal.rs | <Share<TermOut>>>,
input: Fwd<Key>, | termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if !self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) {
if !self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error + 'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if !self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len != 0 {
if !force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string | random_line_split |
|
terminal.rs | Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if !self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) |
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error + 'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if !self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len != 0 {
if !force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup | {
if !self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
} | identifier_body |
terminal.rs | Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if !self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn | (&mut self, cx: CX![]) {
if !self.paused {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
}
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error + 'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if !self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len != 0 {
if !force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup string | pause | identifier_name |
terminal.rs | Share<TermOut>>>,
input: Fwd<Key>,
termout: Share<TermOut>,
glue: Glue,
disable_output: bool,
paused: bool,
inbuf: Vec<u8>,
check_enable: bool,
force_timer: MaxTimerKey,
check_timer: MaxTimerKey,
cleanup: Vec<u8>,
panic_hook: Arc<Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>>,
}
impl Terminal {
/// Set up the terminal. Sends a message back to `resize`
/// immediately, which provides a reference to the shared
/// [`TermOut`] which is used to buffer and flush terminal output
/// data.
///
/// Whenever the window size changes, a new `resize` message is
/// sent. When the terminal output is paused, `None` is sent to
/// `resize` to let the app know that there is no output available
/// right now.
///
/// Input keys received are sent to `input` once decoded.
///
/// In case of an error that can't be handled, cleans up the
/// terminal state and terminates the actor with
/// `ActorDied::Failed`. The actor that created the terminal can
/// catch that and do whatever cleanup is necessary before
/// aborting the process.
///
/// # Panic handling
///
/// When Rust panics, the terminal must be restored to its normal
/// state otherwise things would be left in a bad state for the
/// user (in cooked mode with no echo, requiring the user to
/// blindly type `reset` on the command-line). So this code saves
/// a copy of the current panic handler (using
/// `std::panic::take_hook`), and then installs its own handler
/// that does terminal cleanup before calling on to the saved
/// panic handler. This mean that if any custom panic handler is
/// needed by the application, then it must be set up before the
/// call to [`Terminal::init`].
///
/// [`TermOut`]: struct.TermOut.html
pub fn init(cx: CX![], resize: Fwd<Option<Share<TermOut>>>, input: Fwd<Key>) -> Option<Self> {
// TODO: Query TERM/terminfo/environment for features to put in Features
let features = Features { colour_256: false };
let term = cx.this().clone();
let glue = match Glue::new(cx, term) {
Ok(v) => v,
Err(e) => {
cx.fail(e);
return None;
}
};
let termout = Share::new(cx, TermOut::new(features));
let mut this = Self {
resize,
input,
termout,
glue,
disable_output: false,
paused: false,
inbuf: Vec::new(),
check_enable: false,
force_timer: MaxTimerKey::default(),
check_timer: MaxTimerKey::default(),
cleanup: b"\x1Bc".to_vec(),
panic_hook: Arc::new(std::panic::take_hook()),
};
this.handle_resize(cx);
this.update_panic_hook();
Some(this)
}
/// Enable or disable generation of the [`Key::Check`] keypress,
/// which occurs in a gap in typing, 300ms after the last key
/// pressed. This may be used to do validation if that's too
/// expensive to do on every keypress.
///
/// [`Key::Check`]: enum.Key.html#variant.Check
pub fn check(&mut self, _cx: CX![], enable: bool) {
self.check_enable = enable;
}
/// Ring the bell (i.e. beep) immediately. Doesn't wait for the
/// buffered terminal data to be flushed. Will output even when
/// paused.
pub fn bell(&mut self, cx: CX![]) {
if !self.disable_output {
if let Err(e) = self.glue.write(&b"\x07"[..]) {
self.disable_output = true;
self.failure(cx, e);
}
}
}
/// Pause terminal input and output handling. Sends the cleanup
/// sequence to the terminal, and switches to cooked mode. Sends
/// a `resize` message with `None` to tell the app that output is
/// disabled.
///
/// This call should be used before forking off a process which
/// might prompt the user and receive user input, otherwise this
/// process would compete with the sub-process for user input.
/// Resume after the subprocess has finished with the `resume`
/// call.
pub fn pause(&mut self, cx: CX![]) {
if !self.paused |
}
/// Resume terminal output and input handling. Switches to raw
/// mode and sends a resize message to trigger a full redraw.
pub fn resume(&mut self, cx: CX![]) {
if self.paused {
self.paused = false;
self.glue.input(true);
self.termout.rw(cx).discard();
self.handle_resize(cx);
self.update_panic_hook();
}
}
// Handle an unrecoverable failure. Try to clean up before
// terminating the actor.
fn failure(&mut self, cx: CX![], e: impl Error + 'static) {
self.pause(cx);
cx.fail(e);
}
/// Flush to the terminal all the data that's ready for sending
/// from the TermOut buffer. Use [`TermOut::flush`] first to mark
/// the point up to which data should be flushed.
///
/// [`TermOut::flush`]: struct.TermOut.html#method.flush
pub fn flush(&mut self, cx: CX![]) {
if self.termout.rw(cx).new_cleanup.is_some() {
// Don't replace unless we're sure there's a new value
if let Some(cleanup) = mem::replace(&mut self.termout.rw(cx).new_cleanup, None) {
self.cleanup = cleanup;
self.update_panic_hook();
}
}
if !self.disable_output {
if self.paused {
// Just drop the output whilst paused. We'll trigger
// a full refresh on resuming
self.termout.rw(cx).drain_flush();
} else {
let ob = self.termout.rw(cx);
let result = self.glue.write(ob.data_to_flush());
ob.drain_flush();
if let Err(e) = result {
self.disable_output = true;
self.failure(cx, e);
}
}
}
}
/// Handle a resize event from the TTY. Gets new size, and
/// notifies upstream.
pub(crate) fn handle_resize(&mut self, cx: CX![]) {
match self.glue.get_size() {
Ok((sy, sx)) => {
self.termout.rw(cx).set_size(sy, sx);
fwd!([self.resize], Some(self.termout.clone()));
}
Err(e) => self.failure(cx, e),
}
}
/// Handle an I/O error on the TTY input
pub(crate) fn handle_error_in(&mut self, cx: CX![], err: std::io::Error) {
self.failure(cx, err);
}
/// Handle new bytes from the TTY input
pub(crate) fn handle_data_in(&mut self, cx: CX![]) {
self.glue.read_data(&mut self.inbuf);
self.do_data_in(cx, false);
}
fn do_data_in(&mut self, cx: CX![], force: bool) {
let mut pos = 0;
let len = self.inbuf.len();
if len != 0 {
if !force {
// Note that this is too fast to catch M-Esc passed
// through screen, as that seems to apply a 300ms
// pause between the two Esc chars. For everything
// else including real terminals it should be okay.
timer_max!(
&mut self.force_timer,
cx.now() + Duration::from_millis(100),
[cx],
do_data_in(true)
);
}
while pos < len {
match Key::decode(&self.inbuf[pos..len], force) {
None => break,
Some((count, key)) => {
pos += count;
fwd!([self.input], key);
if self.check_enable {
let check_expiry = cx.now() + Duration::from_millis(300);
timer_max!(&mut self.check_timer, check_expiry, [cx], check_key());
}
}
}
}
}
self.inbuf.drain(..pos);
}
fn check_key(&mut self, _cx: CX![]) {
if self.check_enable {
fwd!([self.input], Key::Check);
}
}
// Install a panic hook that (if necessary) outputs the current
// cleanup | {
fwd!([self.resize], None);
self.glue.input(false);
self.termout.rw(cx).discard();
self.termout.rw(cx).bytes(&self.cleanup[..]);
self.termout.rw(cx).flush();
self.flush(cx);
self.paused = true;
self.update_panic_hook();
} | conditional_block |
importNet.js | 8086/query?db=mydb/";
const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ;
$.ajax({
url: "http://localhost:8086/query?db=mydb",
headers:{
'Authorization': 'Basic ' + btoa('admin:admin'),
},
type: 'POST',
data: {
q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64",
},
success: function(data) { //we got the response
console.log(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
});
/*
let query = 'cpu,host=serverA,region=new value=69';
$.ajax({
url:'http://localhost:8086/write?db=mydb',
type:'POST',
contentType:'application/octet-stream',
data: query,
processData: false,
success: function (data) {
console.info(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
}); */
//template struttura dashboard
let structure = {
__inputs: [],
__requires: [
{
type: "grafana",
id: "grafana",
name: "Grafana",
version: "5.4.0"
}
],
annotations: {
list: [
{
builtIn: 1,
datasource: "-- Grafana --",
enable: true,
hide: true,
iconColor: "rgba(0, 211, 255, 1)",
name: "Annotations & Alerts",
type: "dashboard"
}
]
},
editable: false,
gnetId: null,
graphTooltip: 0,
id: null,
links: [],
panels: [
{
type: "text",
title: "Warning from DreamCorp",
gridPos: {
x: 4,
y: 0,
w: 16,
h: 8
},
id: 0,
mode: "markdown",
content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism"
}
],
schemaVersion: 16,
style: "dark",
tags: ["bayesian-network"],
templating: {
list: [
{
allValue: null,
current: {
text: "nodo 1",
value: "nodo 1"
},
hide: 0,
includeAll: false,
label: "Nodo",
multi: false,
name: "Nodo",
options: [
{
selected: true,
text: "nodo 1",
value: "nodo 1"
}
],
query: "nodo 1",
skipUrlSync: false,
type: "custom"
}
]
},
time: {
from: "now-6h",
to: "now"
},
timepicker: {
refresh_intervals: [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
time_options: [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
timezone: "",
title: "Rete Bayesiana",
uid: "H39FJ39VMA12MD",
version: 3,
network: null
};
export class ImportNetCtrl {
/** @ngInject */
constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) {
this.backendSrv = backendSrv;
this.validationSrv = validationSrv;
this.$location = $location;
this.$routeParams = $routeParams;
this.step = 1;
this.nameExists = false;
this.uidExists = false;
this.autoGenerateUid = true;
this.autoGenerateUidValue = 'auto-generated';
this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null;
this.initialFolderTitle = 'Select a folder';
// check gnetId in url
if ($routeParams.gnetId) {
this.gnetUrl = $routeParams.gnetId;
this.checkGnetDashboard();
}
}
static initProbs(net){
let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità
for(let i=0;i<prob_nodes.length;i++)
net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà
}
//PERSONALIZZATA
onUpload(net) {
this.network = net; //per l'html
//riceverò sempre una net, gli devo aggiungere il template della dashboard
ImportNetCtrl.initProbs(net);
structure.title = net.rete;
structure.network = net; //attacco il pezzo che ricevo al template
console.info("onUpload Rete: ");
console.info(structure.network);
//creating a db
let host ="http://localhost:8086";
let database ="bayesian";
const influx = new Influx(host,database);
influx.createDB().then(()=>{
console.info("database created");
let nodes = [];
let states = [];
let probs = [];
for(let i=0;i<net.nodi.length;i++){
nodes.push(net.nodi[i].id);
states.push(net.nodi[i].stati);
probs.push(net.nodi[i].probs);
}
/*
return influx.insert(nodes,states,probs)
.then(()=>console.info("inserted"));
*/
influx.insert(nodes,states,probs)
.then(()=>console.info("inserted")
.then(()=>{
influx.retrieve(nodes).then((data)=>{
console.info("retrived");
console.info(data);
});
}));
}).catch((err)=>console.info(err));
this.dash = structure; //gli do in pasto la struttura completa di dashboard + net
this.dash.id = null;
this.step = 2;
this.inputs = [];
if (this.dash.__inputs) {
for (const input of this.dash.__inputs) {
const inputModel = {
name: input.name,
label: input.label,
info: input.description,
value: input.value,
type: input.type,
pluginId: input.pluginId,
options: [],
};
if (input.type === 'datasource') {
this.setDatasourceOptions(input, inputModel);
} else if (!inputModel.info) {
inputModel.info = 'Specify a string constant';
}
this.inputs.push(inputModel);
}
}
this.inputsValid = this.inputs.length === 0;
this.titleChanged();
this.uidChanged(true);
}
setDatasourceOptions(input, inputModel) {
const sources = _.filter(config.datasources, val => {
return val.type === input.pluginId;
});
if (sources.length === 0) {
| se if (!inputModel.info) {
inputModel.info = 'Select a ' + input.pluginName + ' data source';
}
inputModel.options = sources.map(val => {
return { text: val.name, value: val.name };
});
}
inputValueChanged() {
this.inputsValid = true;
for (const input of this.inputs) {
if (!input.value) {
this.inputsValid = false;
}
}
}
titleChanged() {
this.titleTouched = true;
this.nameExists = false;
this.validationSrv
.validateNewDashboardName(this.folderId, this.dash.title)
.then(() => {
this.nameExists = false;
this.hasNameValidationError = false;
})
.catch(err => {
if (err.type === 'EXISTING') {
this.nameExists = true;
}
this.hasNameValidationError = true;
this.nameValidationError = err.message;
});
}
uidChanged(initial) {
this.uidExists = false;
this.hasUidValidationError = false;
if (initial === true && this.dash.uid) {
this.autoGenerateUidValue = 'value set';
}
this.backendSrv
.getDashboardByUid(this.dash.uid)
.then(res => {
this.uidExists = true;
this.hasUidValidationError = true;
this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${
res.meta.folderTitle
}' has the same uid`;
})
.catch(err | inputModel.info = 'No data sources of type ' + input.pluginName + ' found';
} el | conditional_block |
importNet.js | :8086/query?db=mydb/";
const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ;
$.ajax({
url: "http://localhost:8086/query?db=mydb",
headers:{
'Authorization': 'Basic ' + btoa('admin:admin'),
},
type: 'POST',
data: {
q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64",
},
success: function(data) { //we got the response
console.log(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
});
/*
let query = 'cpu,host=serverA,region=new value=69';
$.ajax({
url:'http://localhost:8086/write?db=mydb',
type:'POST',
contentType:'application/octet-stream',
data: query,
processData: false,
success: function (data) {
console.info(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
}); */
//template struttura dashboard
let structure = {
__inputs: [],
__requires: [
{
type: "grafana",
id: "grafana",
name: "Grafana",
version: "5.4.0"
}
],
annotations: {
list: [
{
builtIn: 1,
datasource: "-- Grafana --",
enable: true,
hide: true,
iconColor: "rgba(0, 211, 255, 1)",
name: "Annotations & Alerts",
type: "dashboard"
}
]
},
editable: false,
gnetId: null,
graphTooltip: 0,
id: null,
links: [],
panels: [
{
type: "text",
title: "Warning from DreamCorp",
gridPos: {
x: 4,
y: 0,
w: 16,
h: 8
},
id: 0,
mode: "markdown",
content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism"
}
],
schemaVersion: 16,
style: "dark",
tags: ["bayesian-network"],
templating: {
list: [
{
allValue: null,
current: {
text: "nodo 1",
value: "nodo 1"
},
hide: 0,
includeAll: false,
label: "Nodo",
multi: false,
name: "Nodo",
options: [
{
selected: true,
text: "nodo 1",
value: "nodo 1"
}
],
query: "nodo 1",
skipUrlSync: false,
type: "custom"
}
]
},
time: {
from: "now-6h",
to: "now"
},
timepicker: {
refresh_intervals: [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
time_options: [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
timezone: "",
title: "Rete Bayesiana",
uid: "H39FJ39VMA12MD",
version: 3,
network: null
};
export class ImportNetCtrl {
/** @ngInject */
constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) {
this.backendSrv = backendSrv;
this.validationSrv = validationSrv;
this.$location = $location;
this.$routeParams = $routeParams;
this.step = 1;
this.nameExists = false;
this.uidExists = false;
this.autoGenerateUid = true;
this.autoGenerateUidValue = 'auto-generated';
this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null;
this.initialFolderTitle = 'Select a folder';
// check gnetId in url
if ($routeParams.gnetId) {
this.gnetUrl = $routeParams.gnetId;
this.checkGnetDashboard();
}
}
static initProbs(net){
let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità
for(let i=0;i<prob_nodes.length;i++)
net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà
}
//PERSONALIZZATA
onUpload(net) {
this.network = net; //per l'html
//riceverò sempre una net, gli devo aggiungere il template della dashboard
ImportNetCtrl.initProbs(net);
structure.title = net.rete;
structure.network = net; //attacco il pezzo che ricevo al template
console.info("onUpload Rete: ");
console.info(structure.network);
//creating a db
let host ="http://localhost:8086";
let database ="bayesian";
const influx = new Influx(host,database);
influx.createDB().then(()=>{
console.info("database created");
let nodes = [];
let states = [];
let probs = [];
for(let i=0;i<net.nodi.length;i++){
nodes.push(net.nodi[i].id);
states.push(net.nodi[i].stati);
probs.push(net.nodi[i].probs);
}
/*
return influx.insert(nodes,states,probs)
.then(()=>console.info("inserted"));
*/
influx.insert(nodes,states,probs)
.then(()=>console.info("inserted")
.then(()=>{
influx.retrieve(nodes).then((data)=>{
console.info("retrived");
console.info(data);
});
}));
}).catch((err)=>console.info(err));
this.dash = structure; //gli do in pasto la struttura completa di dashboard + net
this.dash.id = null;
this.step = 2;
this.inputs = [];
if (this.dash.__inputs) {
for (const input of this.dash.__inputs) {
const inputModel = {
name: input.name,
label: input.label,
info: input.description,
value: input.value,
type: input.type,
pluginId: input.pluginId,
options: [],
};
if (input.type === 'datasource') {
this.setDatasourceOptions(input, inputModel);
} else if (!inputModel.info) {
inputModel.info = 'Specify a string constant';
}
this.inputs.push(inputModel);
}
}
this.inputsValid = this.inputs.length === 0;
this.titleChanged();
this.uidChanged(true);
}
setDatasourceOptions(input, inputModel) {
const sources = _.filter(config.datasources, val => {
return val.type === input.pluginId;
});
if (sources.length === 0) {
inputModel.info = 'No data sources of type ' + input.pluginName + ' found';
} else if (!inputModel.info) {
inputModel.info = 'Select a ' + input.pluginName + ' data source';
}
inputModel.options = sources.map(val => {
return { text: val.name, value: val.name };
});
}
inputValueChanged() {
this.inputsValid = true;
for (const input of this.inputs) {
if (!input.value) {
this.inputsValid = false;
}
}
}
titleChanged() {
this.titleTouched = true;
this.nameExists = false;
this.validationSrv
.validateNewDashboardName(this.folderId, this.dash.title)
.then(() => {
this.nameExists = false;
this.hasNameValidationError = false;
})
.catch(err => {
if (err.type === 'EXISTING') {
this.nameExists = true;
} |
this.hasNameValidationError = true;
this.nameValidationError = err.message;
});
}
uidChanged(initial) {
this.uidExists = false;
this.hasUidValidationError = false;
if (initial === true && this.dash.uid) {
this.autoGenerateUidValue = 'value set';
}
this.backendSrv
.getDashboardByUid(this.dash.uid)
.then(res => {
this.uidExists = true;
this.hasUidValidationError = true;
this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${
res.meta.folderTitle
}' has the same uid`;
})
.catch(err => | random_line_split |
|
importNet.js | :8086/query?db=mydb/";
const dataS = "q=SELECT+value,region+FROM+cpu+WHERE+value=0.64" ;
$.ajax({
url: "http://localhost:8086/query?db=mydb",
headers:{
'Authorization': 'Basic ' + btoa('admin:admin'),
},
type: 'POST',
data: {
q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64",
},
success: function(data) { //we got the response
console.log(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
});
/*
let query = 'cpu,host=serverA,region=new value=69';
$.ajax({
url:'http://localhost:8086/write?db=mydb',
type:'POST',
contentType:'application/octet-stream',
data: query,
processData: false,
success: function (data) {
console.info(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
}); */
//template struttura dashboard
let structure = {
__inputs: [],
__requires: [
{
type: "grafana",
id: "grafana",
name: "Grafana",
version: "5.4.0"
}
],
annotations: {
list: [
{
builtIn: 1,
datasource: "-- Grafana --",
enable: true,
hide: true,
iconColor: "rgba(0, 211, 255, 1)",
name: "Annotations & Alerts",
type: "dashboard"
}
]
},
editable: false,
gnetId: null,
graphTooltip: 0,
id: null,
links: [],
panels: [
{
type: "text",
title: "Warning from DreamCorp",
gridPos: {
x: 4,
y: 0,
w: 16,
h: 8
},
id: 0,
mode: "markdown",
content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism"
}
],
schemaVersion: 16,
style: "dark",
tags: ["bayesian-network"],
templating: {
list: [
{
allValue: null,
current: {
text: "nodo 1",
value: "nodo 1"
},
hide: 0,
includeAll: false,
label: "Nodo",
multi: false,
name: "Nodo",
options: [
{
selected: true,
text: "nodo 1",
value: "nodo 1"
}
],
query: "nodo 1",
skipUrlSync: false,
type: "custom"
}
]
},
time: {
from: "now-6h",
to: "now"
},
timepicker: {
refresh_intervals: [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
time_options: [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
timezone: "",
title: "Rete Bayesiana",
uid: "H39FJ39VMA12MD",
version: 3,
network: null
};
export class ImportNetCtrl {
/** @ngInject */
constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) {
this.backendSrv = backendSrv;
this.validationSrv = validationSrv;
this.$location = $location;
this.$routeParams = $routeParams;
this.step = 1;
this.nameExists = false;
this.uidExists = false;
this.autoGenerateUid = true;
this.autoGenerateUidValue = 'auto-generated';
this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null;
this.initialFolderTitle = 'Select a folder';
// check gnetId in url
if ($routeParams.gnetId) {
this.gnetUrl = $routeParams.gnetId;
this.checkGnetDashboard();
}
}
static initProbs(net){
let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità
for(let i=0;i<prob_nodes.length;i++)
net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà
}
//PERSONALIZZATA
onUpload(net) {
| states.push(net.nodi[i].stati);
probs.push(net.nodi[i].probs);
}
/*
return influx.insert(nodes,states,probs)
.then(()=>console.info("inserted"));
*/
influx.insert(nodes,states,probs)
.then(()=>console.info("inserted")
.then(()=>{
influx.retrieve(nodes).then((data)=>{
console.info("retrived");
console.info(data);
});
}));
}).catch((err)=>console.info(err));
this.dash = structure; //gli do in pasto la struttura completa di dashboard + net
this.dash.id = null;
this.step = 2;
this.inputs = [];
if (this.dash.__inputs) {
for (const input of this.dash.__inputs) {
const inputModel = {
name: input.name,
label: input.label,
info: input.description,
value: input.value,
type: input.type,
pluginId: input.pluginId,
options: [],
};
if (input.type === 'datasource') {
this.setDatasourceOptions(input, inputModel);
} else if (!inputModel.info) {
inputModel.info = 'Specify a string constant';
}
this.inputs.push(inputModel);
}
}
this.inputsValid = this.inputs.length === 0;
this.titleChanged();
this.uidChanged(true);
}
setDatasourceOptions(input, inputModel) {
const sources = _.filter(config.datasources, val => {
return val.type === input.pluginId;
});
if (sources.length === 0) {
inputModel.info = 'No data sources of type ' + input.pluginName + ' found';
} else if (!inputModel.info) {
inputModel.info = 'Select a ' + input.pluginName + ' data source';
}
inputModel.options = sources.map(val => {
return { text: val.name, value: val.name };
});
}
inputValueChanged() {
this.inputsValid = true;
for (const input of this.inputs) {
if (!input.value) {
this.inputsValid = false;
}
}
}
titleChanged() {
this.titleTouched = true;
this.nameExists = false;
this.validationSrv
.validateNewDashboardName(this.folderId, this.dash.title)
.then(() => {
this.nameExists = false;
this.hasNameValidationError = false;
})
.catch(err => {
if (err.type === 'EXISTING') {
this.nameExists = true;
}
this.hasNameValidationError = true;
this.nameValidationError = err.message;
});
}
uidChanged(initial) {
this.uidExists = false;
this.hasUidValidationError = false;
if (initial === true && this.dash.uid) {
this.autoGenerateUidValue = 'value set';
}
this.backendSrv
.getDashboardByUid(this.dash.uid)
.then(res => {
this.uidExists = true;
this.hasUidValidationError = true;
this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${
res.meta.folderTitle
}' has the same uid`;
})
.catch(err => | this.network = net; //per l'html
//riceverò sempre una net, gli devo aggiungere il template della dashboard
ImportNetCtrl.initProbs(net);
structure.title = net.rete;
structure.network = net; //attacco il pezzo che ricevo al template
console.info("onUpload Rete: ");
console.info(structure.network);
//creating a db
let host ="http://localhost:8086";
let database ="bayesian";
const influx = new Influx(host,database);
influx.createDB().then(()=>{
console.info("database created");
let nodes = [];
let states = [];
let probs = [];
for(let i=0;i<net.nodi.length;i++){
nodes.push(net.nodi[i].id); | identifier_body |
importNet.js | admin'),
},
type: 'POST',
data: {
q:"SELECT+value,region+FROM+cpu+WHERE+value=0.64",
},
success: function(data) { //we got the response
console.log(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
});
/*
let query = 'cpu,host=serverA,region=new value=69';
$.ajax({
url:'http://localhost:8086/write?db=mydb',
type:'POST',
contentType:'application/octet-stream',
data: query,
processData: false,
success: function (data) {
console.info(data);
},
error: function(test, status, exception) {
console.log("Error: " + exception);
}
}); */
//template struttura dashboard
let structure = {
__inputs: [],
__requires: [
{
type: "grafana",
id: "grafana",
name: "Grafana",
version: "5.4.0"
}
],
annotations: {
list: [
{
builtIn: 1,
datasource: "-- Grafana --",
enable: true,
hide: true,
iconColor: "rgba(0, 211, 255, 1)",
name: "Annotations & Alerts",
type: "dashboard"
}
]
},
editable: false,
gnetId: null,
graphTooltip: 0,
id: null,
links: [],
panels: [
{
type: "text",
title: "Warning from DreamCorp",
gridPos: {
x: 4,
y: 0,
w: 16,
h: 8
},
id: 0,
mode: "markdown",
content: "# This is a dashboard that include all the information about the net you imported. DO NOT enter edit mode because saving it would break our fecth data mechanism"
}
],
schemaVersion: 16,
style: "dark",
tags: ["bayesian-network"],
templating: {
list: [
{
allValue: null,
current: {
text: "nodo 1",
value: "nodo 1"
},
hide: 0,
includeAll: false,
label: "Nodo",
multi: false,
name: "Nodo",
options: [
{
selected: true,
text: "nodo 1",
value: "nodo 1"
}
],
query: "nodo 1",
skipUrlSync: false,
type: "custom"
}
]
},
time: {
from: "now-6h",
to: "now"
},
timepicker: {
refresh_intervals: [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
time_options: [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
timezone: "",
title: "Rete Bayesiana",
uid: "H39FJ39VMA12MD",
version: 3,
network: null
};
export class ImportNetCtrl {
/** @ngInject */
constructor(backendSrv, validationSrv, navModelSrv, $location, $routeParams) {
this.backendSrv = backendSrv;
this.validationSrv = validationSrv;
this.$location = $location;
this.$routeParams = $routeParams;
this.step = 1;
this.nameExists = false;
this.uidExists = false;
this.autoGenerateUid = true;
this.autoGenerateUidValue = 'auto-generated';
this.folderId = $routeParams.folderId ? Number($routeParams.folderId) || 0 : null;
this.initialFolderTitle = 'Select a folder';
// check gnetId in url
if ($routeParams.gnetId) {
this.gnetUrl = $routeParams.gnetId;
this.checkGnetDashboard();
}
}
static initProbs(net){
let prob_nodes = appCtrl.getProbs(); //replace di appCtrl con netParser ==> ci sono anche i controlli di integrità
for(let i=0;i<prob_nodes.length;i++)
net.nodi[i].probs = prob_nodes[i]; //aggiungo le probabiltà
}
//PERSONALIZZATA
onUpload(net) {
this.network = net; //per l'html
//riceverò sempre una net, gli devo aggiungere il template della dashboard
ImportNetCtrl.initProbs(net);
structure.title = net.rete;
structure.network = net; //attacco il pezzo che ricevo al template
console.info("onUpload Rete: ");
console.info(structure.network);
//creating a db
let host ="http://localhost:8086";
let database ="bayesian";
const influx = new Influx(host,database);
influx.createDB().then(()=>{
console.info("database created");
let nodes = [];
let states = [];
let probs = [];
for(let i=0;i<net.nodi.length;i++){
nodes.push(net.nodi[i].id);
states.push(net.nodi[i].stati);
probs.push(net.nodi[i].probs);
}
/*
return influx.insert(nodes,states,probs)
.then(()=>console.info("inserted"));
*/
influx.insert(nodes,states,probs)
.then(()=>console.info("inserted")
.then(()=>{
influx.retrieve(nodes).then((data)=>{
console.info("retrived");
console.info(data);
});
}));
}).catch((err)=>console.info(err));
this.dash = structure; //gli do in pasto la struttura completa di dashboard + net
this.dash.id = null;
this.step = 2;
this.inputs = [];
if (this.dash.__inputs) {
for (const input of this.dash.__inputs) {
const inputModel = {
name: input.name,
label: input.label,
info: input.description,
value: input.value,
type: input.type,
pluginId: input.pluginId,
options: [],
};
if (input.type === 'datasource') {
this.setDatasourceOptions(input, inputModel);
} else if (!inputModel.info) {
inputModel.info = 'Specify a string constant';
}
this.inputs.push(inputModel);
}
}
this.inputsValid = this.inputs.length === 0;
this.titleChanged();
this.uidChanged(true);
}
setDatasourceOptions(input, inputModel) {
const sources = _.filter(config.datasources, val => {
return val.type === input.pluginId;
});
if (sources.length === 0) {
inputModel.info = 'No data sources of type ' + input.pluginName + ' found';
} else if (!inputModel.info) {
inputModel.info = 'Select a ' + input.pluginName + ' data source';
}
inputModel.options = sources.map(val => {
return { text: val.name, value: val.name };
});
}
inputValueChanged() {
this.inputsValid = true;
for (const input of this.inputs) {
if (!input.value) {
this.inputsValid = false;
}
}
}
titleChanged() {
this.titleTouched = true;
this.nameExists = false;
this.validationSrv
.validateNewDashboardName(this.folderId, this.dash.title)
.then(() => {
this.nameExists = false;
this.hasNameValidationError = false;
})
.catch(err => {
if (err.type === 'EXISTING') {
this.nameExists = true;
}
this.hasNameValidationError = true;
this.nameValidationError = err.message;
});
}
uidChanged(initial) {
this.uidExists = false;
this.hasUidValidationError = false;
if (initial === true && this.dash.uid) {
this.autoGenerateUidValue = 'value set';
}
this.backendSrv
.getDashboardByUid(this.dash.uid)
.then(res => {
this.uidExists = true;
this.hasUidValidationError = true;
this.uidValidationError = `Dashboard named '${res.dashboard.title}' in folder '${
res.meta.folderTitle
}' has the same uid`;
})
.catch(err => {
err.isHandled = true;
});
}
onFolderChange(folder) {
this.folderId = folder.id;
this.titleChanged();
}
onEnterFolderCreation() {
this.inputsValid = false;
}
onExitFolderCreation() {
this.inputValueChanged();
}
isV | alid() | identifier_name |
|
dataset_RAF.py | 9": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"])
# Labelling
def | (gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen | get_gender_label | identifier_name |
dataset_RAF.py | 9": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"])
# Labelling
def get_gender_label(gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
| self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
| def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None | identifier_body |
dataset_RAF.py | 9": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"])
# Labelling
def get_gender_label(gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv | output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
| def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data: | random_line_split |
dataset_RAF.py | 9": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
|
# Labelling
def get_gender_label(gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
| print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"]) | conditional_block |
mod.rs | _at_mut(degree * OUT_LEN);
// Recurse! This uses multiple threads if the "rayon" feature is enabled.
let (left_n, right_n) = J::join(
|| compress_parents_wide::<J>(left, key, flags, platform, left_out),
|| compress_parents_wide::<J>(right, key, flags, platform, right_out),
left.len(),
right.len(),
);
// The special case again. If simd_degree=1, then we'll have left_n=1 and
// right_n=1. Rather than compressing them into a single output, return
// them directly, to make sure we always have at least two outputs.
debug_assert_eq!(left_n, degree);
debug_assert!(right_n >= 1 && right_n <= left_n);
if left_n == 1 {
out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
return 2;
}
// Otherwise, do one layer of parent node compression.
let num_children = left_n + right_n;
compress_parents_parallel(
&cv_array[..num_children * OUT_LEN],
key,
flags,
platform,
out,
)
}
// Variant of compress_subtree_to_parent_node which takes parents as input.
fn compress_parents_to_parent_node<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
) -> [u8; BLOCK_LEN] {
debug_assert!(input.len() > BLOCK_LEN);
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array);
debug_assert!(num_cvs >= 2);
// If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
// compress_parents_wide() returns more than 2 chaining values. Condense
// them into 2 by forming parent nodes repeatedly.
let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
while num_cvs > 2 {
let cv_slice = &cv_array[..num_cvs * OUT_LEN];
num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
}
*array_ref!(cv_array, 0, 2 * OUT_LEN)
}
/// GPU-accelerated Hasher.
///
/// This is a wrapper around a [`Hasher`] which also allows exporting the key
/// and flags to be used by a GPU shader, and importing the shader's result.
///
/// This wrapper should be used with care, since incorrect use can lead to a
/// wrong hash output. It also allows extracting the key from the state, which
/// would otherwise not be allowed in safe code.
///
/// This wrapper can be freely converted to its inner [`Hasher`], through the
/// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`]
/// wherever the extra functionality from this wrapper is not needed.
///
/// [`Hasher`]: ../struct.Hasher.html
#[derive(Clone, Debug, Default)]
pub struct GpuHasher {
inner: Hasher,
}
impl GpuHasher {
/// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new).
#[inline]
pub fn new() -> Self {
Self {
inner: Hasher::new(),
}
}
/// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed).
#[inline]
pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
Self {
inner: Hasher::new_keyed(key),
}
}
/// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the | ///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u64;
assert_eq!(chunk_count % parent_count, 0, "invalid child count");
// The lazy merge of the CV stack needs at least 2 inputs.
// And compress_parents_to_parent_node needs at least 2 blocks.
assert!(parent_count > 2, "invalid parent count");
// The shader inputs and outputs are 32-bit words, which are in native byte order.
// The chunk shader byte swaps its input, but neither shader byte swaps its output.
// Since the rest of the code assumes little endian, byte swap the buffer here.
Self::swap_endian::<J>(parents);
let cv_pair = compress_parents_to_parent_node::<J>(
parents,
&self.key,
self.chunk_state.flags,
self.chunk_state.platform,
);
let left_cv = array_ref!(cv_pair, 0, 32);
let right_cv = array_ref!(cv_pair, 32, 32);
// Push the two CVs we received into the CV stack in order. Because
// the stack merges lazily, this guarantees we aren't merging the
// root.
self.push_cv(left_cv, chunk_counter);
self.push_cv(right_cv, chunk_counter + (chunk_count / 2));
self.chunk_state.chunk_counter += chunk_count;
self
}
// CPU simulation of the BLAKE3 chunk shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_chunk_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * CHUNK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_chunk_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
for chunk in input.chunks_exact(CHUNK_LEN) {
chunks | /// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place. | random_line_split |
mod.rs | .new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the
/// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place.
///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u64;
assert_eq!(chunk_count % parent_count, 0, "invalid child count");
// The lazy merge of the CV stack needs at least 2 inputs.
// And compress_parents_to_parent_node needs at least 2 blocks.
assert!(parent_count > 2, "invalid parent count");
// The shader inputs and outputs are 32-bit words, which are in native byte order.
// The chunk shader byte swaps its input, but neither shader byte swaps its output.
// Since the rest of the code assumes little endian, byte swap the buffer here.
Self::swap_endian::<J>(parents);
let cv_pair = compress_parents_to_parent_node::<J>(
parents,
&self.key,
self.chunk_state.flags,
self.chunk_state.platform,
);
let left_cv = array_ref!(cv_pair, 0, 32);
let right_cv = array_ref!(cv_pair, 32, 32);
// Push the two CVs we received into the CV stack in order. Because
// the stack merges lazily, this guarantees we aren't merging the
// root.
self.push_cv(left_cv, chunk_counter);
self.push_cv(right_cv, chunk_counter + (chunk_count / 2));
self.chunk_state.chunk_counter += chunk_count;
self
}
// CPU simulation of the BLAKE3 chunk shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_chunk_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * CHUNK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * CHUNK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_chunk_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_chunk_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut chunks = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
for chunk in input.chunks_exact(CHUNK_LEN) {
chunks.push(array_ref!(chunk, 0, CHUNK_LEN));
}
self.chunk_state.platform.hash_many(
&chunks,
control.key(),
control.chunk_counter(),
IncrementCounter::Yes,
control.flags(),
CHUNK_START,
CHUNK_END,
output,
);
}
}
// CPU simulation of the BLAKE3 parent shader.
//
// This can be used to test the real shader.
//
// Note: unlike the real shader, this simulation always uses little-endian
// inputs and outputs.
#[doc(hidden)]
pub fn simulate_parent_shader<J: Join>(
&self,
count: usize,
input: &[u8],
output: &mut [u8],
control: &GpuControl,
) {
assert_eq!(input.len(), count * BLOCK_LEN, "invalid input size");
assert_eq!(output.len(), count * OUT_LEN, "invalid output size");
if count > self.chunk_state.platform.simd_degree() {
let mid = count / 2;
let (left_in, right_in) = input.split_at(mid * BLOCK_LEN);
let (left_out, right_out) = output.split_at_mut(mid * OUT_LEN);
let control_r = control.plus_chunks(mid as u64);
J::join(
|| self.simulate_parent_shader::<J>(mid, left_in, left_out, control),
|| self.simulate_parent_shader::<J>(count - mid, right_in, right_out, &control_r),
left_in.len(),
right_in.len(),
);
} else if count > 0 {
let mut parents = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE]>::new();
for parent in input.chunks_exact(BLOCK_LEN) {
parents.push(array_ref!(parent, 0, BLOCK_LEN));
}
self.chunk_state.platform.hash_many(
&parents,
control.key(),
0,
IncrementCounter::No,
control.flags() | PARENT,
0,
0,
output,
);
}
}
#[doc(hidden)]
#[cfg(target_endian = "big")]
pub fn swap_endian<J: Join>(buffer: &mut [u8]) {
debug_assert!(buffer.len().is_power_of_two(), "invalid buffer size");
debug_assert_eq!(buffer.len() % OUT_LEN, 0, "invalid buffer size");
if buffer.len() > OUT_LEN {
let (left, right) = buffer.split_at_mut(buffer.len() / 2);
let left_len = left.len();
let right_len = right.len();
J::join(
|| Self::swap_endian::<J>(left),
|| Self::swap_endian::<J>(right),
left_len,
right_len,
);
} else {
for buf in buffer.chunks_exact_mut(4) {
buf.swap(0, 3);
buf.swap(1, 2);
}
}
}
#[doc(hidden)]
#[inline(always)]
#[cfg(target_endian = "little")]
pub fn swap_endian<J: Join>(_buffer: &mut [u8]) {}
}
impl Deref for GpuHasher {
type Target = Hasher;
#[inline]
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for GpuHasher {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<GpuHasher> for Hasher {
#[inline]
fn from(hasher: GpuHasher) -> Hasher {
hasher.inner
}
}
/// SPIR-V shader modules.
pub mod shaders {
/// Shader module for one level of the BLAKE3 tree.
pub mod blake3 {
/// Returns the SPIR-V code for the chunk shader module.
#[cfg(target_endian = "big")]
pub fn chunk_shader() -> &'static [u8] | {
include_bytes!("shaders/blake3-chunk-be.spv")
} | identifier_body |
|
mod.rs | (&self) -> u8 {
self.d as u8
}
/// Returns the bytes to be copied to the control uniform in the GPU.
///
/// The contents of the returned slice are opaque and should be interpreted
/// only by the shader.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
// According to the specification, the host and the device must have
// the same endianness, so no endian conversion is necessary even on
// big-endian hosts.
debug_assert_eq!(
mem::size_of_val(self),
shaders::blake3::CONTROL_UNIFORM_SIZE,
"must not have padding"
);
unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) }
}
}
// Variant of compress_subtree_wide which takes parents as input.
fn compress_parents_wide<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
out: &mut [u8],
) -> usize {
debug_assert!(input.len().is_power_of_two());
// Note that the single block case does *not* bump the SIMD degree up to 2
// when it is 1. This allows Rayon the option of multi-threading even the
// 2-block case, which can help performance on smaller platforms.
if input.len() <= platform.simd_degree() * BLOCK_LEN {
return compress_parents_parallel(input, key, flags, platform, out);
}
// With more than simd_degree blocks, we need to recurse. Start by dividing
// the input into left and right subtrees. (Note that this is only optimal
// as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
// of 3 or something, we'll need a more complicated strategy.)
debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
let (left, right) = input.split_at(input.len() / 2);
// Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
// account for the special case of returning 2 outputs when the SIMD degree
// is 1.
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let degree = if left.len() == BLOCK_LEN {
// The "simd_degree=1 and we're at the leaf nodes" case.
debug_assert_eq!(platform.simd_degree(), 1);
1
} else {
cmp::max(platform.simd_degree(), 2)
};
let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
// Recurse! This uses multiple threads if the "rayon" feature is enabled.
let (left_n, right_n) = J::join(
|| compress_parents_wide::<J>(left, key, flags, platform, left_out),
|| compress_parents_wide::<J>(right, key, flags, platform, right_out),
left.len(),
right.len(),
);
// The special case again. If simd_degree=1, then we'll have left_n=1 and
// right_n=1. Rather than compressing them into a single output, return
// them directly, to make sure we always have at least two outputs.
debug_assert_eq!(left_n, degree);
debug_assert!(right_n >= 1 && right_n <= left_n);
if left_n == 1 {
out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
return 2;
}
// Otherwise, do one layer of parent node compression.
let num_children = left_n + right_n;
compress_parents_parallel(
&cv_array[..num_children * OUT_LEN],
key,
flags,
platform,
out,
)
}
// Variant of compress_subtree_to_parent_node which takes parents as input.
fn compress_parents_to_parent_node<J: Join>(
input: &[u8],
key: &CVWords,
flags: u8,
platform: Platform,
) -> [u8; BLOCK_LEN] {
debug_assert!(input.len() > BLOCK_LEN);
let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
let mut num_cvs = compress_parents_wide::<J>(input, &key, flags, platform, &mut cv_array);
debug_assert!(num_cvs >= 2);
// If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
// compress_parents_wide() returns more than 2 chaining values. Condense
// them into 2 by forming parent nodes repeatedly.
let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
while num_cvs > 2 {
let cv_slice = &cv_array[..num_cvs * OUT_LEN];
num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
}
*array_ref!(cv_array, 0, 2 * OUT_LEN)
}
/// GPU-accelerated Hasher.
///
/// This is a wrapper around a [`Hasher`] which also allows exporting the key
/// and flags to be used by a GPU shader, and importing the shader's result.
///
/// This wrapper should be used with care, since incorrect use can lead to a
/// wrong hash output. It also allows extracting the key from the state, which
/// would otherwise not be allowed in safe code.
///
/// This wrapper can be freely converted to its inner [`Hasher`], through the
/// `Deref`, `DerefMut`, and `Into` traits. Prefer to use the inner [`Hasher`]
/// wherever the extra functionality from this wrapper is not needed.
///
/// [`Hasher`]: ../struct.Hasher.html
#[derive(Clone, Debug, Default)]
pub struct GpuHasher {
inner: Hasher,
}
impl GpuHasher {
/// Wrapper for [`Hasher::new`](../struct.Hasher.html#method.new).
#[inline]
pub fn new() -> Self {
Self {
inner: Hasher::new(),
}
}
/// Wrapper for [`Hasher::new_keyed`](../struct.Hasher.html#method.new_keyed).
#[inline]
pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
Self {
inner: Hasher::new_keyed(key),
}
}
/// Wrapper for [`Hasher::new_derive_key`](../struct.Hasher.html#method.new_derive_key).
#[inline]
pub fn new_derive_key(context: &str) -> Self {
Self {
inner: Hasher::new_derive_key(context),
}
}
/// Obtain the [`GpuControl`](struct.GpuControl.html) to hash full chunks starting with `chunk_counter`
/// or parent nodes.
pub fn gpu_control(&self, chunk_counter: u64) -> GpuControl {
GpuControl::new(&self.key, chunk_counter, self.chunk_state.flags)
}
/// GPU-accelerated version of [`update_with_join`].
///
/// Unlike [`update_with_join`], this method receives the parents computed
/// by one or more applications of the BLAKE3 shader.
///
/// This method has several restrictions. The size of the shader input must
/// be a power of two, it must be naturally aligned within the hash input,
/// and the hasher state must not have any leftover bytes in its internal
/// buffers. The simplest way to follow these invariants is to use this
/// method, with the same chunk count and buffer size, for all of the input
/// except for a variable-sized tail, which can use [`update_with_join`] or
/// [`update`].
///
/// Note: the chunk counter is implicit in this method, but it must be the
/// same as the chunk counter in the [`GpuControl`] passed to the shader,
/// otherwise it will lead to a wrong hash output.
///
/// Note: on a big-endian host, this method will swap the endianness of the
/// shader output in-place.
///
/// [`update`]: #method.update
/// [`update_with_join`]: #method.update_with_join
/// [`GpuControl`]: struct.GpuControl.html
pub fn update_from_gpu<J: Join>(&mut self, chunk_count: u64, parents: &mut [u8]) -> &mut Self {
assert_eq!(self.chunk_state.len(), 0, "leftover buffered bytes");
let chunk_counter = self.chunk_state.chunk_counter;
// These three checks make sure the increment of t0 in the shader did not overflow.
assert!(chunk_count.is_power_of_two(), "bad chunk count");
assert!(chunk_count <= (1 << 32), "chunk count overflow");
assert_eq!(chunk_counter % chunk_count, 0, "misaligned hash");
assert_eq!(parents.len() % OUT_LEN, 0, "invalid hash size");
let parent_count = (parents.len() / OUT_LEN) as u6 | flags | identifier_name |
|
transfer_leads.py | sends to Certify SFDC instance
result_dict = send_to_certify(standardized_list)
print(result_dict)
#posts notification to Slack upon failure to insert to Certify SFDC
if(result_dict[0].get('success') == False):
message = f"LEAD TRANSFER TO CERTIFY FAILURE \n"
message += f"failed lead insert for the following IDs: \n"
for num in range(len(lead_list)):
idval = lead_list[num].get("Id")
message += idval
if num != (len(idList) - 1):
message += f", "
else:
message += "\n"
message += f"Returned error log from Salesforce: \n"
message += result_dict[0].get('errors')[0].get('message')
_publish_alert(message)
else:
#deletes JSON file within S3
s3 = boto3.client('s3')
s3.delete_object(Bucket=bucket,Key=key)
return {
'statusCode': 200,
'body': json.dumps('Transfer complete')
}
def _get_lead_list(idList):
query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE "
id_query_string = ""
for num in range(len(idList)):
id_query_string += "(ID = '" + idList[num] + "')"
if num != (len(idList) - 1):
id_query_string += " OR "
query_string += id_query_string
sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host'])
sf_data = sf.query_all(query_string)
return sf_data['records']
def create_new_dict(lead_dict):
new_dict = {}
new_dict['FirstName'] = lead_dict.get('FirstName')
new_dict['LastName'] = lead_dict.get('LastName')
new_dict['Company'] = lead_dict.get('Company')
new_dict['Title'] = lead_dict.get('Title')
new_dict['Phone'] = lead_dict.get('Phone')
new_dict['Email'] = lead_dict.get('Email')
new_dict['Fax'] = lead_dict.get('Fax')
new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c')
new_dict['Street'] = lead_dict.get('Street')
new_dict['State'] = lead_dict.get('State')
new_dict['City'] = lead_dict.get('City')
new_dict['PostalCode'] = lead_dict.get('PostalCode')
new_dict['Country'] = lead_dict.get('Country')
new_dict['Website'] = lead_dict.get('Website')
new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees')
new_dict['Industry'] = lead_dict.get('Industry')
new_dict['LeadSource'] = lead_dict.get('LeadSource')
new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c')
new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c')
new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c')
print(new_dict)
return new_dict
def send_to_certify(lead_list):
sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host'])
return sf.bulk.Lead.insert(lead_list,batch_size=200)
def add_notes_and_standardize(lead_list):
new_dict_array = []
for lead in lead_list:
lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead))
lead.__setitem__('Employee_Range__c',standardize_employee_range(lead))
lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead))
lead['LeadSource'] = 'Chrome River Transfer'
lead['Industry'] = standardize_industry(lead)
if(lead.get('Country') != None):
lead['Country'] = standardize_country(lead)
if(lead.get('State') != None):
lead['State'] = standardize_state(lead)
new_dict_array.append(create_new_dict(lead))
return new_dict_array
def mql_verify(lead_dict):
mql_status = False
if (lead_dict.get('Recent_Conversion__c') != None):
mql_status = True
print(mql_status)
return mql_status
def generate_cr_notes_field(lead_dict):
note_text = ''
if(lead_dict.get('LeadSource') != None):
note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | '
if(lead_dict.get('Recent_Conversion__c') != None):
note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |'
if(lead_dict.get('Tasks') != None):
note_text += generate_task_summary(lead_dict.get('Tasks').get('records'))
if(lead_dict.get('CampaignMembers') != None):
note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records'))
return note_text
def generate_task_summary(task_list):
task_text = 'Tasks: '
for task in task_list:
task_text += ' ( ' + task.get('Subject') + ' ) '
task_text += '| '
return task_text
def generate_campaign_summary(campaign_mem_list):
campaign_mem_text = 'Campaigns: '
for campaign in campaign_mem_list:
campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) '
campaign_mem_text += '| '
return campaign_mem_text
def standardize_employee_range(lead_dict):
e_count = lead_dict.get('NumberOfEmployees')
e_range = ''
if(e_count < 26):
e_range = '1-25'
elif(e_count > 26):
e_range = '26-200'
return e_range
def | (lead_dict):
cr_industry = lead_dict.get('Industry')
cert_industry = lead_dict.get('Industry')
if(cr_industry == 'Accounting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Advertising'):
cert_industry = 'Business Services'
elif(cr_industry == 'Apparel'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Architecture'):
cert_industry = 'Business Services'
elif(cr_industry == 'Banking'):
cert_industry = 'Finance'
elif(cr_industry == 'Biotechnology'):
cert_industry = 'Healthcare'
elif(cr_industry == 'Chemicals'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Communications'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Consulting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Electronics'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Energy'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Engineering'):
cert_industry = 'Business Services'
elif(cr_industry == 'Entertainment'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Environmental'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Food & Beverage'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Machinery'):
cert_industry = 'Industrial'
elif(cr_industry == 'Media'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Not For Profit'):
cert_industry = 'Organizations'
elif(cr_industry == 'Other'):
cert_industry = 'Industrial'
elif(cr_industry == 'Professional Service'):
cert_industry = 'Business Services'
elif(cr_industry == 'Public Relations'):
cert_industry = 'Business Services'
elif(cr_industry == 'Recreation'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Shipping'):
cert_industry = 'Transportation'
elif(cr_industry == 'Sports'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Technology'):
cert_industry = 'Software'
elif(cr_industry == 'Telecom'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Travel'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Utilities'):
cert_industry = 'Energy, Utilities & Waste Treatment'
else:
cert_industry = 'Industrial'
return cert_industry
def standardize_country(lead_dict):
cr_country = lead_dict.get('Country')
cert_country = lead | standardize_industry | identifier_name |
transfer_leads.py | ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host'])
sf_data = sf.query_all(query_string)
return sf_data['records']
def create_new_dict(lead_dict):
new_dict = {}
new_dict['FirstName'] = lead_dict.get('FirstName')
new_dict['LastName'] = lead_dict.get('LastName')
new_dict['Company'] = lead_dict.get('Company')
new_dict['Title'] = lead_dict.get('Title')
new_dict['Phone'] = lead_dict.get('Phone')
new_dict['Email'] = lead_dict.get('Email')
new_dict['Fax'] = lead_dict.get('Fax')
new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c')
new_dict['Street'] = lead_dict.get('Street')
new_dict['State'] = lead_dict.get('State')
new_dict['City'] = lead_dict.get('City')
new_dict['PostalCode'] = lead_dict.get('PostalCode')
new_dict['Country'] = lead_dict.get('Country')
new_dict['Website'] = lead_dict.get('Website')
new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees')
new_dict['Industry'] = lead_dict.get('Industry')
new_dict['LeadSource'] = lead_dict.get('LeadSource')
new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c')
new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c')
new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c')
print(new_dict)
return new_dict
def send_to_certify(lead_list):
sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host'])
return sf.bulk.Lead.insert(lead_list,batch_size=200)
def add_notes_and_standardize(lead_list):
new_dict_array = []
for lead in lead_list:
lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead))
lead.__setitem__('Employee_Range__c',standardize_employee_range(lead))
lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead))
lead['LeadSource'] = 'Chrome River Transfer'
lead['Industry'] = standardize_industry(lead)
if(lead.get('Country') != None):
lead['Country'] = standardize_country(lead)
if(lead.get('State') != None):
lead['State'] = standardize_state(lead)
new_dict_array.append(create_new_dict(lead))
return new_dict_array
def mql_verify(lead_dict):
mql_status = False
if (lead_dict.get('Recent_Conversion__c') != None):
mql_status = True
print(mql_status)
return mql_status
def generate_cr_notes_field(lead_dict):
note_text = ''
if(lead_dict.get('LeadSource') != None):
note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | '
if(lead_dict.get('Recent_Conversion__c') != None):
note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |'
if(lead_dict.get('Tasks') != None):
note_text += generate_task_summary(lead_dict.get('Tasks').get('records'))
if(lead_dict.get('CampaignMembers') != None):
note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records'))
return note_text
def generate_task_summary(task_list):
task_text = 'Tasks: '
for task in task_list:
task_text += ' ( ' + task.get('Subject') + ' ) '
task_text += '| '
return task_text
def generate_campaign_summary(campaign_mem_list):
campaign_mem_text = 'Campaigns: '
for campaign in campaign_mem_list:
campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) '
campaign_mem_text += '| '
return campaign_mem_text
def standardize_employee_range(lead_dict):
e_count = lead_dict.get('NumberOfEmployees')
e_range = ''
if(e_count < 26):
e_range = '1-25'
elif(e_count > 26):
e_range = '26-200'
return e_range
def standardize_industry(lead_dict):
cr_industry = lead_dict.get('Industry')
cert_industry = lead_dict.get('Industry')
if(cr_industry == 'Accounting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Advertising'):
cert_industry = 'Business Services'
elif(cr_industry == 'Apparel'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Architecture'):
cert_industry = 'Business Services'
elif(cr_industry == 'Banking'):
cert_industry = 'Finance'
elif(cr_industry == 'Biotechnology'):
cert_industry = 'Healthcare'
elif(cr_industry == 'Chemicals'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Communications'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Consulting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Electronics'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Energy'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Engineering'):
cert_industry = 'Business Services'
elif(cr_industry == 'Entertainment'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Environmental'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Food & Beverage'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Machinery'):
cert_industry = 'Industrial'
elif(cr_industry == 'Media'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Not For Profit'):
cert_industry = 'Organizations'
elif(cr_industry == 'Other'):
cert_industry = 'Industrial'
elif(cr_industry == 'Professional Service'):
cert_industry = 'Business Services'
elif(cr_industry == 'Public Relations'):
cert_industry = 'Business Services'
elif(cr_industry == 'Recreation'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Shipping'):
cert_industry = 'Transportation'
elif(cr_industry == 'Sports'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Technology'):
cert_industry = 'Software'
elif(cr_industry == 'Telecom'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Travel'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Utilities'):
cert_industry = 'Energy, Utilities & Waste Treatment'
else:
cert_industry = 'Industrial'
return cert_industry
def standardize_country(lead_dict):
cr_country = lead_dict.get('Country')
cert_country = lead_dict.get('Country')
if(cr_country == 'Bolivia'):
cert_country = 'Bolivia, Plurinational State of'
elif(cr_country == 'Iran'):
cert_country = 'Iran, Islamic Republic of'
elif(cr_country == 'North Korea'):
cert_country = 'Korea, Democratic People\'s Republic of'
elif(cr_country == 'South Korea'):
cert_country = 'Korea, Republic of'
elif(cr_country == 'Laos'):
cert_country = 'Lao People\'s Democratic Republic'
elif(cr_country == 'Moldova'):
cert_country = 'Moldova, Republic of'
elif(cr_country == 'Marshall Islands'):
cert_country = 'Saint Martin (French part)'
elif(cr_country == 'Macedonia'):
cert_country = 'Greece'
elif(cr_country == 'Russia'):
cert_country = 'Russian Federation'
elif(cr_country == 'Saint Helena'):
cert_country = 'Saint Helena, Ascension and Tristan da Cunha'
elif(cr_country == 'Tanzania'):
cert_country = 'Tanzania, United Republic of'
elif(cr_country == 'Vatican City State'):
cert_country = 'Holy See (Vatican City State)'
elif(cr_country == 'Venezuela'):
cert_country = 'Venezuela, Bolivarian Republic of'
elif(cr_country == 'Viet nam'):
cert_country = 'Vietnam'
return cert_country
def standardize_state(lead_dict):
cert_country = lead_dict.get('Country')
cr_state = lead_dict.get('State')
cert_state = lead_dict.get('State')
if(cert_country == 'Australia'):
if(cr_state == 'Brisbane'):
cert_state = 'Queensland'
if(cert_country == 'China'):
if(cr_state == 'Chinese Taipei'):
cert_state = 'Taiwan'
if(cert_country == 'United Kingdom'):
| cert_state = None | conditional_block |
|
transfer_leads.py | file within S3
s3 = boto3.client('s3')
s3.delete_object(Bucket=bucket,Key=key)
return {
'statusCode': 200,
'body': json.dumps('Transfer complete')
}
def _get_lead_list(idList):
query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE "
id_query_string = ""
for num in range(len(idList)):
id_query_string += "(ID = '" + idList[num] + "')"
if num != (len(idList) - 1):
id_query_string += " OR "
query_string += id_query_string
sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host'])
sf_data = sf.query_all(query_string)
return sf_data['records']
def create_new_dict(lead_dict):
new_dict = {}
new_dict['FirstName'] = lead_dict.get('FirstName')
new_dict['LastName'] = lead_dict.get('LastName')
new_dict['Company'] = lead_dict.get('Company')
new_dict['Title'] = lead_dict.get('Title')
new_dict['Phone'] = lead_dict.get('Phone')
new_dict['Email'] = lead_dict.get('Email')
new_dict['Fax'] = lead_dict.get('Fax')
new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c')
new_dict['Street'] = lead_dict.get('Street')
new_dict['State'] = lead_dict.get('State')
new_dict['City'] = lead_dict.get('City')
new_dict['PostalCode'] = lead_dict.get('PostalCode')
new_dict['Country'] = lead_dict.get('Country')
new_dict['Website'] = lead_dict.get('Website')
new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees')
new_dict['Industry'] = lead_dict.get('Industry')
new_dict['LeadSource'] = lead_dict.get('LeadSource')
new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c')
new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c')
new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c')
print(new_dict)
return new_dict
def send_to_certify(lead_list):
sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host'])
return sf.bulk.Lead.insert(lead_list,batch_size=200)
def add_notes_and_standardize(lead_list):
new_dict_array = []
for lead in lead_list:
lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead))
lead.__setitem__('Employee_Range__c',standardize_employee_range(lead))
lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead))
lead['LeadSource'] = 'Chrome River Transfer'
lead['Industry'] = standardize_industry(lead)
if(lead.get('Country') != None):
lead['Country'] = standardize_country(lead)
if(lead.get('State') != None):
lead['State'] = standardize_state(lead)
new_dict_array.append(create_new_dict(lead))
return new_dict_array
def mql_verify(lead_dict):
mql_status = False
if (lead_dict.get('Recent_Conversion__c') != None):
mql_status = True
print(mql_status)
return mql_status
def generate_cr_notes_field(lead_dict):
note_text = ''
if(lead_dict.get('LeadSource') != None):
note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | '
if(lead_dict.get('Recent_Conversion__c') != None):
note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |'
if(lead_dict.get('Tasks') != None):
note_text += generate_task_summary(lead_dict.get('Tasks').get('records'))
if(lead_dict.get('CampaignMembers') != None):
note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records'))
return note_text
def generate_task_summary(task_list):
task_text = 'Tasks: '
for task in task_list:
task_text += ' ( ' + task.get('Subject') + ' ) '
task_text += '| '
return task_text
def generate_campaign_summary(campaign_mem_list):
campaign_mem_text = 'Campaigns: '
for campaign in campaign_mem_list:
campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) '
campaign_mem_text += '| '
return campaign_mem_text
def standardize_employee_range(lead_dict):
e_count = lead_dict.get('NumberOfEmployees')
e_range = ''
if(e_count < 26):
e_range = '1-25'
elif(e_count > 26):
e_range = '26-200'
return e_range
def standardize_industry(lead_dict):
cr_industry = lead_dict.get('Industry')
cert_industry = lead_dict.get('Industry')
if(cr_industry == 'Accounting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Advertising'):
cert_industry = 'Business Services'
elif(cr_industry == 'Apparel'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Architecture'):
cert_industry = 'Business Services'
elif(cr_industry == 'Banking'):
cert_industry = 'Finance'
elif(cr_industry == 'Biotechnology'):
cert_industry = 'Healthcare'
elif(cr_industry == 'Chemicals'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Communications'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Consulting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Electronics'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Energy'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Engineering'):
cert_industry = 'Business Services'
elif(cr_industry == 'Entertainment'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Environmental'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Food & Beverage'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Machinery'):
cert_industry = 'Industrial'
elif(cr_industry == 'Media'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Not For Profit'):
cert_industry = 'Organizations'
elif(cr_industry == 'Other'):
cert_industry = 'Industrial'
elif(cr_industry == 'Professional Service'):
cert_industry = 'Business Services'
elif(cr_industry == 'Public Relations'):
cert_industry = 'Business Services'
elif(cr_industry == 'Recreation'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Shipping'):
cert_industry = 'Transportation'
elif(cr_industry == 'Sports'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Technology'):
cert_industry = 'Software'
elif(cr_industry == 'Telecom'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Travel'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Utilities'):
cert_industry = 'Energy, Utilities & Waste Treatment'
else:
cert_industry = 'Industrial'
return cert_industry
def standardize_country(lead_dict):
| cr_country = lead_dict.get('Country')
cert_country = lead_dict.get('Country')
if(cr_country == 'Bolivia'):
cert_country = 'Bolivia, Plurinational State of'
elif(cr_country == 'Iran'):
cert_country = 'Iran, Islamic Republic of'
elif(cr_country == 'North Korea'):
cert_country = 'Korea, Democratic People\'s Republic of'
elif(cr_country == 'South Korea'):
cert_country = 'Korea, Republic of'
elif(cr_country == 'Laos'):
cert_country = 'Lao People\'s Democratic Republic'
elif(cr_country == 'Moldova'):
cert_country = 'Moldova, Republic of'
elif(cr_country == 'Marshall Islands'):
cert_country = 'Saint Martin (French part)'
elif(cr_country == 'Macedonia'):
cert_country = 'Greece'
elif(cr_country == 'Russia'):
cert_country = 'Russian Federation' | identifier_body |
|
transfer_leads.py | message += f"Returned error log from Salesforce: \n"
message += result_dict[0].get('errors')[0].get('message')
_publish_alert(message)
else:
#deletes JSON file within S3
s3 = boto3.client('s3')
s3.delete_object(Bucket=bucket,Key=key)
return {
'statusCode': 200,
'body': json.dumps('Transfer complete')
}
def _get_lead_list(idList):
query_string = "SELECT ID,FirstName,LastName,Company,Phone,MobilePhone,Email,Fax,LinkedIn_Profile__c,Title,Status,Street,State,City,PostalCode,Country,NumberOfEmployees,Industry,LeadSource,Website,Recent_Conversion__c,Recent_Conversion_Date__c,(SELECT Subject,Type FROM Tasks WHERE Type = 'Form Submission'),(SELECT Campaign_Name__c,Status FROM CampaignMembers) FROM Lead WHERE "
id_query_string = ""
for num in range(len(idList)):
id_query_string += "(ID = '" + idList[num] + "')"
if num != (len(idList) - 1):
id_query_string += " OR "
query_string += id_query_string
sf = Salesforce(username=os.environ['cr_sf_username'], password=os.environ['cr_sf_password'], security_token=os.environ['cr_sf_token'],domain=os.environ['cr_sf_host'])
sf_data = sf.query_all(query_string)
return sf_data['records']
def create_new_dict(lead_dict):
new_dict = {}
new_dict['FirstName'] = lead_dict.get('FirstName')
new_dict['LastName'] = lead_dict.get('LastName')
new_dict['Company'] = lead_dict.get('Company')
new_dict['Title'] = lead_dict.get('Title')
new_dict['Phone'] = lead_dict.get('Phone')
new_dict['Email'] = lead_dict.get('Email')
new_dict['Fax'] = lead_dict.get('Fax')
new_dict['Linkedin_Profile__c'] = lead_dict.get('LinkedIn_Profile__c')
new_dict['Street'] = lead_dict.get('Street')
new_dict['State'] = lead_dict.get('State')
new_dict['City'] = lead_dict.get('City')
new_dict['PostalCode'] = lead_dict.get('PostalCode')
new_dict['Country'] = lead_dict.get('Country')
new_dict['Website'] = lead_dict.get('Website')
new_dict['NumberOfEmployees'] = lead_dict.get('NumberOfEmployees')
new_dict['Industry'] = lead_dict.get('Industry')
new_dict['LeadSource'] = lead_dict.get('LeadSource')
new_dict['Chrome_River_Transfer_Notes__c'] = lead_dict.get('Chrome_River_Transfer_Notes__c')
new_dict['Employee_Range__c'] = lead_dict.get('Employee_Range__c')
new_dict['Chrome_River_MQL__c'] = lead_dict.get('Chrome_River_MQL__c')
print(new_dict)
return new_dict
def send_to_certify(lead_list):
sf = Salesforce(username=os.environ['cert_sf_username'], password=os.environ['cert_sf_password'], security_token=os.environ['cert_sf_token'],domain=os.environ['cert_sf_host'])
return sf.bulk.Lead.insert(lead_list,batch_size=200)
def add_notes_and_standardize(lead_list):
new_dict_array = []
for lead in lead_list:
lead.__setitem__('Chrome_River_Transfer_Notes__c', generate_cr_notes_field(lead))
lead.__setitem__('Employee_Range__c',standardize_employee_range(lead))
lead.__setitem__('Chrome_River_MQL__c',mql_verify(lead))
lead['LeadSource'] = 'Chrome River Transfer'
lead['Industry'] = standardize_industry(lead)
if(lead.get('Country') != None):
lead['Country'] = standardize_country(lead)
if(lead.get('State') != None):
lead['State'] = standardize_state(lead)
new_dict_array.append(create_new_dict(lead))
return new_dict_array
def mql_verify(lead_dict):
mql_status = False
if (lead_dict.get('Recent_Conversion__c') != None):
mql_status = True
print(mql_status)
return mql_status
def generate_cr_notes_field(lead_dict):
note_text = ''
if(lead_dict.get('LeadSource') != None):
note_text += 'LeadSource: ' + lead_dict.get('LeadSource') + ' | '
if(lead_dict.get('Recent_Conversion__c') != None):
note_text += 'Recent Conversion: ' + lead_dict.get('Recent_Conversion__c') + ' |'
if(lead_dict.get('Tasks') != None):
note_text += generate_task_summary(lead_dict.get('Tasks').get('records'))
if(lead_dict.get('CampaignMembers') != None):
note_text += generate_campaign_summary(lead_dict.get('CampaignMembers').get('records'))
return note_text
def generate_task_summary(task_list):
task_text = 'Tasks: '
for task in task_list:
task_text += ' ( ' + task.get('Subject') + ' ) '
task_text += '| '
return task_text
def generate_campaign_summary(campaign_mem_list):
campaign_mem_text = 'Campaigns: '
for campaign in campaign_mem_list:
campaign_mem_text += ' ( ' + campaign.get('Campaign_Name__c') + ' Status: ' + campaign.get('Status') + ' ) '
campaign_mem_text += '| '
return campaign_mem_text
def standardize_employee_range(lead_dict):
e_count = lead_dict.get('NumberOfEmployees')
e_range = ''
if(e_count < 26):
e_range = '1-25'
elif(e_count > 26):
e_range = '26-200'
return e_range
def standardize_industry(lead_dict):
cr_industry = lead_dict.get('Industry')
cert_industry = lead_dict.get('Industry')
if(cr_industry == 'Accounting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Advertising'):
cert_industry = 'Business Services'
elif(cr_industry == 'Apparel'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Architecture'):
cert_industry = 'Business Services'
elif(cr_industry == 'Banking'):
cert_industry = 'Finance'
elif(cr_industry == 'Biotechnology'):
cert_industry = 'Healthcare'
elif(cr_industry == 'Chemicals'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Communications'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Consulting'):
cert_industry = 'Business Services'
elif(cr_industry == 'Electronics'):
cert_industry = 'Manufacturing'
elif(cr_industry == 'Energy'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Engineering'):
cert_industry = 'Business Services'
elif(cr_industry == 'Entertainment'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Environmental'):
cert_industry = 'Energy, Utilities & Waste Treatment'
elif(cr_industry == 'Food & Beverage'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Machinery'):
cert_industry = 'Industrial'
elif(cr_industry == 'Media'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Not For Profit'):
cert_industry = 'Organizations'
elif(cr_industry == 'Other'):
cert_industry = 'Industrial'
elif(cr_industry == 'Professional Service'):
cert_industry = 'Business Services'
elif(cr_industry == 'Public Relations'):
cert_industry = 'Business Services'
elif(cr_industry == 'Recreation'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Shipping'):
cert_industry = 'Transportation'
elif(cr_industry == 'Sports'):
cert_industry = 'Media & Internet'
elif(cr_industry == 'Technology'):
cert_industry = 'Software'
elif(cr_industry == 'Telecom'):
cert_industry = 'Telecommunications'
elif(cr_industry == 'Travel'):
cert_industry = 'Consumer Services'
elif(cr_industry == 'Utilities'):
cert_industry = 'Energy, Utilities & Waste Treatment'
else:
cert_industry = 'Industrial'
return cert_industry
def standardize_country(lead_dict):
cr_country = lead_dict.get('Country')
cert_country = lead_dict.get('Country')
if(cr_country == 'Bolivia'):
cert_country = 'Bolivia, Plurinational State of'
elif(cr_country == 'Iran'):
cert_country = 'Iran, Islamic Republic of'
elif(cr_country == 'North Korea'):
cert_country = 'Korea, Democratic People\'s Republic of'
elif(cr_country == 'South Korea'):
cert_country = 'Korea, Republic of'
elif(cr_country == 'Laos'):
cert_country = 'Lao People\'s Democratic Republic'
elif(cr_country == 'Moldova'):
cert_country = 'Moldova, Republic of'
elif(cr_country == 'Marshall Islands'): | random_line_split |
||
caclient.go | {
// Uri is access point for fabric-ca server. Port number and scheme must be provided.
// for example http://127.0.0.1:7054
Url string
// SkipTLSVerification define how connection must handle invalid TLC certificates.
// if true, all verifications are skipped. This value is overwritten by Transport property, if provided
SkipTLSVerification bool
// Crypto is CryptSuite implementation used to sign request for fabric-ca server
Crypto CryptoSuite
// Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used
// It is responsibility of the user to provide proper TLS/certificate setting in TLS communication.
Transport *http.Transport
}
// enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert
type enrollmentResponse struct {
Success bool `json:"success"`
Result enrollmentResponseResult `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
type enrollmentResponseResult struct {
Cert string
ServerInfo enrollmentResponseServerInfo
}
type enrollmentResponseServerInfo struct {
CAName string
CAChain string
}
// Register registers new user in fabric-ca server. In registration request attributes, affiliation and
// max enrolments must be set. On success, password will be in CAResponse.Result.Credential.
// If password is not provided, random secret will be generated.
// It is responsibility of the SDK user to ensure passwords are with big entropy.
// Certificate parameter is certificate for user that makes registration and this user MUST have the role for
// registering new users.
func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) {
if req.EnrolmentId == "" {
return nil, ErrEnrolmentMissing
}
if req.Affiliation == "" {
return nil, ErrAffiliationMissing
}
if req.Type == "" {
return nil, ErrTypeMissing
}
if identity == nil {
return nil, ErrCertificateEmpty
}
reqJson, err := json.Marshal(req)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/register", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// Enroll execute enrollment request for registered user in fabric-ca server.
// On success new Identity with ECert is returned
func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) {
if len(enrollmentId) < 1 {
return nil, nil, ErrEnrollmentIdMissing
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key)
if err != nil {
return nil, nil, err
}
url := fmt.Sprintf("%s/api/v1/enroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(enrollmentId, password)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, nil, err
}
if !enrResp.Success {
return nil, nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, csr, nil
}
// Revoke revokes ECert in fabric-ca server.
// Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know
// about this certificate revocation.
// It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list.
func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) {
reqJson, err := json.Marshal(request)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/revoke", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved.
func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) {
if identity == nil || identity.EnrollmentId() == "" {
return nil, ErrCertificateEmpty
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/reenroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, crm)
if err != nil {
return nil, err
}
req.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, err
}
if !enrResp.Success {
return nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, nil
}
// createAuthToken creates http authorization header token to verify the request.
// it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key
func (f *FabricCAClientImpl) | createAuthToken | identifier_name |
|
caclient.go | :"max_enrollments,omitempty"`
// Affiliation associates identity with particular organisation.
// for example org1.department1 makes this identity part of organisation `org1` and department `department1`
Affiliation string `json:"affiliation"`
// Attrs are attributes associated with this identity
Attrs []*CARegistrationRequestAttr `json:"attrs"`
}
// CARegistrationRequestAttr holds user attribute used for registration
// for example user may have attr `accountType` with value `premium`
// this attributes can be accessed in chainCode and build business logic on top of them
type CARegistrationRequestAttr struct {
Name string `json:"name"`
Value string `json:"value"`
}
// CARevocationRequest holds data needed to revoke certificate in fabric-ca
// If AKI and Serial are provided this will revoke specific certificate.
// If EnrolmentID is provided all certificated for this EnrollmentID will be revoked and all his/hers future attempts
// to enroll will fail.
type CARevocationRequest struct {
// EnrollmentId of the identity whose certificates should be revoked
// If this field is omitted, then Serial and AKI must be specified.
EnrollmentId string `json:"id,omitempty"`
// Serial number of the certificate to be revoked
// If this is omitted, then EnrollmentId must be specified
Serial string `json:"serial,omitempty"`
// AKI (Authority Key Identifier) of the certificate to be revoked
AKI string `json:"aki,omitempty"`
// Reason is the reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for
// valid values. The default value is 0 (ocsp.Unspecified).
Reason int `json:"reason,omitempty"`
}
// CAResponse represents response message from fabric-ca server
type CAResponse struct {
Success bool `json:"success"`
Result CARegisterCredentialResponse `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
// CARegisterCredentialResponse credentials from fabric-ca server registration request
type CARegisterCredentialResponse struct {
Secret string `json:"secret"`
}
// CAResponseErr represents error message from fabric-ca server
type CAResponseErr struct {
Code int `json:"code"`
Message string `json:"message"`
}
// certificateRequest holds certificate request that must be signed by fabric-ca
type CertificateRequest struct {
CR string `json:"certificate_request"`
}
// FabricCAClientImpl is client implementation for fabric-ca server
type FabricCAClientImpl struct {
// Uri is access point for fabric-ca server. Port number and scheme must be provided.
// for example http://127.0.0.1:7054
Url string
// SkipTLSVerification define how connection must handle invalid TLC certificates.
// if true, all verifications are skipped. This value is overwritten by Transport property, if provided
SkipTLSVerification bool
// Crypto is CryptSuite implementation used to sign request for fabric-ca server
Crypto CryptoSuite
// Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used
// It is responsibility of the user to provide proper TLS/certificate setting in TLS communication.
Transport *http.Transport
}
// enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert
type enrollmentResponse struct {
Success bool `json:"success"`
Result enrollmentResponseResult `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
type enrollmentResponseResult struct {
Cert string
ServerInfo enrollmentResponseServerInfo
}
type enrollmentResponseServerInfo struct {
CAName string
CAChain string
}
// Register registers new user in fabric-ca server. In registration request attributes, affiliation and
// max enrolments must be set. On success, password will be in CAResponse.Result.Credential.
// If password is not provided, random secret will be generated.
// It is responsibility of the SDK user to ensure passwords are with big entropy.
// Certificate parameter is certificate for user that makes registration and this user MUST have the role for
// registering new users.
func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) {
if req.EnrolmentId == "" {
return nil, ErrEnrolmentMissing
}
if req.Affiliation == "" {
return nil, ErrAffiliationMissing
}
if req.Type == "" {
return nil, ErrTypeMissing
}
if identity == nil {
return nil, ErrCertificateEmpty
}
reqJson, err := json.Marshal(req)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/register", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil |
return result, nil
}
// Enroll execute enrollment request for registered user in fabric-ca server.
// On success new Identity with ECert is returned
func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) {
if len(enrollmentId) < 1 {
return nil, nil, ErrEnrollmentIdMissing
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key)
if err != nil {
return nil, nil, err
}
url := fmt.Sprintf("%s/api/v1/enroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(enrollmentId, password)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, nil, err
}
if !enrResp.Success {
return nil, nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, csr, nil
}
// Revoke revokes ECert in fabric-ca server.
// Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know
// about this certificate revocation.
// It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list.
func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) {
reqJson, err := json.Marshal(request)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/revoke", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
| {
return nil, err
} | conditional_block |
caclient.go | reason for revocation. See https://godoc.org/golang.org/x/crypto/ocsp for
// valid values. The default value is 0 (ocsp.Unspecified).
Reason int `json:"reason,omitempty"`
}
// CAResponse represents response message from fabric-ca server
type CAResponse struct {
Success bool `json:"success"`
Result CARegisterCredentialResponse `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
// CARegisterCredentialResponse credentials from fabric-ca server registration request
type CARegisterCredentialResponse struct {
Secret string `json:"secret"`
}
// CAResponseErr represents error message from fabric-ca server
type CAResponseErr struct {
Code int `json:"code"`
Message string `json:"message"`
}
// certificateRequest holds certificate request that must be signed by fabric-ca
type CertificateRequest struct {
CR string `json:"certificate_request"`
}
// FabricCAClientImpl is client implementation for fabric-ca server
type FabricCAClientImpl struct {
// Uri is access point for fabric-ca server. Port number and scheme must be provided.
// for example http://127.0.0.1:7054
Url string
// SkipTLSVerification define how connection must handle invalid TLC certificates.
// if true, all verifications are skipped. This value is overwritten by Transport property, if provided
SkipTLSVerification bool
// Crypto is CryptSuite implementation used to sign request for fabric-ca server
Crypto CryptoSuite
// Transport define transport rules for communication with fabric-ca server. If nil, default Go setting will be used
// It is responsibility of the user to provide proper TLS/certificate setting in TLS communication.
Transport *http.Transport
}
// enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert
type enrollmentResponse struct {
Success bool `json:"success"`
Result enrollmentResponseResult `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
type enrollmentResponseResult struct {
Cert string
ServerInfo enrollmentResponseServerInfo
}
type enrollmentResponseServerInfo struct {
CAName string
CAChain string
}
// Register registers new user in fabric-ca server. In registration request attributes, affiliation and
// max enrolments must be set. On success, password will be in CAResponse.Result.Credential.
// If password is not provided, random secret will be generated.
// It is responsibility of the SDK user to ensure passwords are with big entropy.
// Certificate parameter is certificate for user that makes registration and this user MUST have the role for
// registering new users.
func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) {
if req.EnrolmentId == "" {
return nil, ErrEnrolmentMissing
}
if req.Affiliation == "" {
return nil, ErrAffiliationMissing
}
if req.Type == "" {
return nil, ErrTypeMissing
}
if identity == nil {
return nil, ErrCertificateEmpty
}
reqJson, err := json.Marshal(req)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/register", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// Enroll execute enrollment request for registered user in fabric-ca server.
// On success new Identity with ECert is returned
func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) {
if len(enrollmentId) < 1 {
return nil, nil, ErrEnrollmentIdMissing
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key)
if err != nil {
return nil, nil, err
}
url := fmt.Sprintf("%s/api/v1/enroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(enrollmentId, password)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, nil, err
}
if !enrResp.Success {
return nil, nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, csr, nil
}
// Revoke revokes ECert in fabric-ca server.
// Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know
// about this certificate revocation.
// It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list.
func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) {
reqJson, err := json.Marshal(request)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/revoke", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved.
func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) {
if identity == nil || identity.EnrollmentId() == "" {
return nil, ErrCertificateEmpty
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/reenroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, crm)
if err != nil {
return nil, err
}
req.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
| random_line_split |
||
caclient.go | used
// It is responsibility of the user to provide proper TLS/certificate setting in TLS communication.
Transport *http.Transport
}
// enrollmentResponse is response from fabric-ca server for enrolment that contains created Ecert
type enrollmentResponse struct {
Success bool `json:"success"`
Result enrollmentResponseResult `json:"result"`
Errors []CAResponseErr `json:"errors"`
Messages []string `json:"messages"`
}
type enrollmentResponseResult struct {
Cert string
ServerInfo enrollmentResponseServerInfo
}
type enrollmentResponseServerInfo struct {
CAName string
CAChain string
}
// Register registers new user in fabric-ca server. In registration request attributes, affiliation and
// max enrolments must be set. On success, password will be in CAResponse.Result.Credential.
// If password is not provided, random secret will be generated.
// It is responsibility of the SDK user to ensure passwords are with big entropy.
// Certificate parameter is certificate for user that makes registration and this user MUST have the role for
// registering new users.
func (f *FabricCAClientImpl) Register(identity *Identity, req *CARegistrationRequest) (*CAResponse, error) {
if req.EnrolmentId == "" {
return nil, ErrEnrolmentMissing
}
if req.Affiliation == "" {
return nil, ErrAffiliationMissing
}
if req.Type == "" {
return nil, ErrTypeMissing
}
if identity == nil {
return nil, ErrCertificateEmpty
}
reqJson, err := json.Marshal(req)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/register", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// Enroll execute enrollment request for registered user in fabric-ca server.
// On success new Identity with ECert is returned
func (f *FabricCAClientImpl) Enroll(enrollmentId, password string) (*Identity, []byte, error) {
if len(enrollmentId) < 1 {
return nil, nil, ErrEnrollmentIdMissing
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(enrollmentId, key)
if err != nil {
return nil, nil, err
}
url := fmt.Sprintf("%s/api/v1/enroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(enrollmentId, password)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, nil, err
}
if !enrResp.Success {
return nil, nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, csr, nil
}
// Revoke revokes ECert in fabric-ca server.
// Note that this request will revoke certificate ONLY in fabric-ca server. Peers (for now) do not know
// about this certificate revocation.
// It is responsibility of the SDK user to update peers and set this certificate in every peer revocation list.
func (f *FabricCAClientImpl) Revoke(identity *Identity, request *CARevocationRequest) (*CAResponse, error) {
reqJson, err := json.Marshal(request)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/revoke", f.Url)
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(reqJson))
httpReq.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, reqJson)
if err != nil {
return nil, err
}
httpReq.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
result := new(CAResponse)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// ReEnroll create new certificate from old one. Useful when certificate is about to expire. Attributes are preserved.
func (f *FabricCAClientImpl) ReEnroll(identity *Identity) (*Identity, error) {
if identity == nil || identity.EnrollmentId() == "" {
return nil, ErrCertificateEmpty
}
// create new cert and send it to CA for signing
key, err := f.Crypto.GenerateKey()
if err != nil {
return nil, err
}
csr, err := f.Crypto.CreateCertificateRequest(identity.EnrollmentId(), key)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/api/v1/reenroll", f.Url)
crm, err := json.Marshal(CertificateRequest{CR: string(csr)})
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(crm))
req.Header.Set("Content-Type", "application/json")
token, err := f.createAuthToken(identity, crm)
if err != nil {
return nil, err
}
req.Header.Set("authorization", token)
var tr *http.Transport
if f.Transport == nil {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipTLSVerification},
}
} else {
tr = f.Transport
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
enrResp := new(enrollmentResponse)
if err := json.Unmarshal(body, enrResp); err != nil {
return nil, err
}
if !enrResp.Success {
return nil, ErrEnrollment
}
rawCert, err := base64.StdEncoding.DecodeString(enrResp.Result.Cert)
if err != nil {
return nil, err
}
a, _ := pem.Decode(rawCert)
cert, err := x509.ParseCertificate(a.Bytes)
if err != nil {
return nil, err
}
return &Identity{Certificate: cert, PrivateKey: key}, nil
}
// createAuthToken creates http authorization header token to verify the request.
// it is composed by base64 encoded Cert concatenated by base64 encoded request signed with Cert private key
func (f *FabricCAClientImpl) createAuthToken(identity *Identity, request []byte) (string, error) | {
encPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: identity.Certificate.Raw})
encCert := base64.StdEncoding.EncodeToString(encPem)
body := base64.StdEncoding.EncodeToString(request)
sigString := body + "." + encCert
sig, err := f.Crypto.Sign([]byte(sigString), identity.PrivateKey)
if err != nil {
return "", err
}
return fmt.Sprintf("%s.%s", encCert, base64.StdEncoding.EncodeToString(sig)), nil
} | identifier_body |
|
machine_amd64.go | () error {
var (
kernelSystemRegs systemRegs
kernelUserRegs userRegs
)
// Set base control registers.
kernelSystemRegs.CR0 = c.CR0()
kernelSystemRegs.CR4 = c.CR4()
kernelSystemRegs.EFER = c.EFER()
// Set the IDT & GDT in the registers.
kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT()
kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT()
kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode)
kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata)
kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata)
tssBase, tssLimit, tss := c.TSS()
kernelSystemRegs.TR.Load(tss, ring0.Tss)
kernelSystemRegs.TR.base = tssBase
kernelSystemRegs.TR.limit = uint32(tssLimit)
// Point to kernel page tables, with no initial PCID.
kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0)
// Initialize the PCID database.
if hasGuestPCID {
// Note that NewPCIDs may return a nil table here, in which
// case we simply don't use PCID support (see below). In
// practice, this should not happen, however.
c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs)
}
// Set the CPUID; this is required before setting system registers,
// since KVM will reject several CR4 bits if the CPUID does not
// indicate the support is available.
if err := c.setCPUID(); err != nil {
return err
}
// Set the entrypoint for the kernel.
kernelUserRegs.RIP = uint64(ring0.AddrOfStart())
kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer())
kernelUserRegs.RSP = c.StackTop()
kernelUserRegs.RFLAGS = ring0.KernelFlagsSet
// Set the system registers.
if err := c.setSystemRegisters(&kernelSystemRegs); err != nil {
return err
}
// Set the user registers.
if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 {
return fmt.Errorf("error setting user registers: %v", errno)
}
// Set the time offset to the host native time.
return c.setSystemTime()
}
// bitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio.
// It is set using getBitsForScaling when the KVM platform is initialized.
var bitsForScaling int64
// getBitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio. This allows us to replicate the (bad) math done by
// the kernel below in scaledTSC, and ensure we can compute an exact zero
// offset in setSystemTime.
//
// These constants correspond to kvm_tsc_scaling_ratio_frac_bits.
func getBitsForScaling() int64 {
fs := cpuid.HostFeatureSet()
if fs.Intel() {
return 48 // See vmx.c (kvm sources).
} else if fs.AMD() {
return 32 // See svm.c (svm sources).
} else {
return 63 // Unknown: theoretical maximum.
}
}
// scaledTSC returns the host TSC scaled by the given frequency.
//
// This assumes a current frequency of 1. We require only the unitless ratio of
// rawFreq to some current frequency. See setSystemTime for context.
//
// The kernel math guarantees that all bits of the multiplication and division
// will be correctly preserved and applied. However, it is not possible to
// actually store the ratio correctly. So we need to use the same schema in
// order to calculate the scaled frequency and get the same result.
//
// We can assume that the current frequency is (1), so we are calculating a
// strict inverse of this value. This simplifies this function considerably.
//
// Roughly, the returned value "scaledTSC" will have:
// scaledTSC/hostTSC == 1/rawFreq
//
//go:nosplit
func scaledTSC(rawFreq uintptr) int64 {
scale := int64(1 << bitsForScaling)
ratio := big.NewInt(scale / int64(rawFreq))
ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc())))
ratio.Div(ratio, big.NewInt(scale))
return ratio.Int64()
}
// setSystemTime sets the vCPU to the system time.
func (c *vCPU) setSystemTime() error {
// Attempt to set the offset directly. This is supported as of Linux 5.16,
// or commit 828ca89628bfcb1b8f27535025f69dd00eb55207.
if err := c.setTSCOffset(); err == nil {
return err
}
// If tsc scaling is not supported, fallback to legacy mode.
if !c.machine.tscControl {
return c.setSystemTimeLegacy()
}
// First, scale down the clock frequency to the lowest value allowed by
// the API itself. How low we can go depends on the underlying
// hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD.
// Even the lower bound here will take a 4GHz frequency down to 1Hz,
// meaning that everything should be able to handle a Khz setting of 1
// with bits to spare.
//
// Note that reducing the clock does not typically require special
// capabilities as it is emulated in KVM. We don't actually use this
// capability, but it means that this method should be robust to
// different hardware configurations.
rawFreq, err := c.getTSCFreq()
if err != nil {
return c.setSystemTimeLegacy()
}
if err := c.setTSCFreq(1); err != nil {
return c.setSystemTimeLegacy()
}
// Always restore the original frequency.
defer func() {
if err := c.setTSCFreq(rawFreq); err != nil {
panic(err.Error())
}
}()
// Attempt to set the system time in this compressed world. The
// calculation for offset normally looks like:
//
// offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc());
//
// So as long as the kvm_scale_tsc component is constant before and
// after the call to set the TSC value (and it is passes as the
// target_tsc), we will compute an offset value of zero.
//
// This is effectively cheating to make our "setSystemTime" call so
// unbelievably, incredibly fast that we do it "instantly" and all the
// calculations result in an offset of zero.
lastTSC := scaledTSC(rawFreq)
for {
if err := c.setTSC(uint64(lastTSC)); err != nil {
return err
}
nextTSC := scaledTSC(rawFreq)
if lastTSC == nextTSC {
return nil
}
lastTSC = nextTSC // Try again.
}
}
// nonCanonical generates a canonical address return.
//
//go:nosplit
func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
*info = linux.SignalInfo{
Signo: signal,
Code: linux.SI_KERNEL,
}
info.SetAddr(addr) // Include address.
return hostarch.NoAccess, platform.ErrContextSignal
}
// fault generates an appropriate fault return.
//
//go:nosplit
func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := ring0.ReadCR2()
code, user := c.ErrorCode()
if !user {
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
*info = linux.SignalInfo{Signo: signal}
info.SetAddr(uint64(faultAddr))
accessType := hostarch.AccessType{}
if signal == int32(unix.SIGSEGV) {
accessType = hostarch.AccessType{
Read: code&(1<<1) == 0,
Write: code&(1<<1) != 0,
Execute: code&(1<<4) != 0,
}
}
if !accessType.Write | initArchState | identifier_name |
|
machine_amd64.go | )
// Set base control registers.
kernelSystemRegs.CR0 = c.CR0()
kernelSystemRegs.CR4 = c.CR4()
kernelSystemRegs.EFER = c.EFER()
// Set the IDT & GDT in the registers.
kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT()
kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT()
kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode)
kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata)
kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata)
tssBase, tssLimit, tss := c.TSS()
kernelSystemRegs.TR.Load(tss, ring0.Tss)
kernelSystemRegs.TR.base = tssBase
kernelSystemRegs.TR.limit = uint32(tssLimit)
// Point to kernel page tables, with no initial PCID.
kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0)
// Initialize the PCID database.
if hasGuestPCID {
// Note that NewPCIDs may return a nil table here, in which
// case we simply don't use PCID support (see below). In
// practice, this should not happen, however.
c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs)
}
// Set the CPUID; this is required before setting system registers,
// since KVM will reject several CR4 bits if the CPUID does not
// indicate the support is available.
if err := c.setCPUID(); err != nil {
return err
}
// Set the entrypoint for the kernel.
kernelUserRegs.RIP = uint64(ring0.AddrOfStart())
kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer())
kernelUserRegs.RSP = c.StackTop()
kernelUserRegs.RFLAGS = ring0.KernelFlagsSet
// Set the system registers.
if err := c.setSystemRegisters(&kernelSystemRegs); err != nil {
return err
}
// Set the user registers.
if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 {
return fmt.Errorf("error setting user registers: %v", errno)
}
// Set the time offset to the host native time.
return c.setSystemTime()
}
// bitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio.
// It is set using getBitsForScaling when the KVM platform is initialized.
var bitsForScaling int64
// getBitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio. This allows us to replicate the (bad) math done by
// the kernel below in scaledTSC, and ensure we can compute an exact zero
// offset in setSystemTime.
//
// These constants correspond to kvm_tsc_scaling_ratio_frac_bits.
func getBitsForScaling() int64 {
fs := cpuid.HostFeatureSet()
if fs.Intel() {
return 48 // See vmx.c (kvm sources).
} else if fs.AMD() {
return 32 // See svm.c (svm sources).
} else {
return 63 // Unknown: theoretical maximum.
}
}
// scaledTSC returns the host TSC scaled by the given frequency.
//
// This assumes a current frequency of 1. We require only the unitless ratio of
// rawFreq to some current frequency. See setSystemTime for context.
//
// The kernel math guarantees that all bits of the multiplication and division
// will be correctly preserved and applied. However, it is not possible to
// actually store the ratio correctly. So we need to use the same schema in
// order to calculate the scaled frequency and get the same result.
//
// We can assume that the current frequency is (1), so we are calculating a
// strict inverse of this value. This simplifies this function considerably.
//
// Roughly, the returned value "scaledTSC" will have:
// scaledTSC/hostTSC == 1/rawFreq
//
//go:nosplit
func scaledTSC(rawFreq uintptr) int64 {
scale := int64(1 << bitsForScaling)
ratio := big.NewInt(scale / int64(rawFreq))
ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc())))
ratio.Div(ratio, big.NewInt(scale))
return ratio.Int64()
}
// setSystemTime sets the vCPU to the system time.
func (c *vCPU) setSystemTime() error {
// Attempt to set the offset directly. This is supported as of Linux 5.16,
// or commit 828ca89628bfcb1b8f27535025f69dd00eb55207.
if err := c.setTSCOffset(); err == nil |
// If tsc scaling is not supported, fallback to legacy mode.
if !c.machine.tscControl {
return c.setSystemTimeLegacy()
}
// First, scale down the clock frequency to the lowest value allowed by
// the API itself. How low we can go depends on the underlying
// hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD.
// Even the lower bound here will take a 4GHz frequency down to 1Hz,
// meaning that everything should be able to handle a Khz setting of 1
// with bits to spare.
//
// Note that reducing the clock does not typically require special
// capabilities as it is emulated in KVM. We don't actually use this
// capability, but it means that this method should be robust to
// different hardware configurations.
rawFreq, err := c.getTSCFreq()
if err != nil {
return c.setSystemTimeLegacy()
}
if err := c.setTSCFreq(1); err != nil {
return c.setSystemTimeLegacy()
}
// Always restore the original frequency.
defer func() {
if err := c.setTSCFreq(rawFreq); err != nil {
panic(err.Error())
}
}()
// Attempt to set the system time in this compressed world. The
// calculation for offset normally looks like:
//
// offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc());
//
// So as long as the kvm_scale_tsc component is constant before and
// after the call to set the TSC value (and it is passes as the
// target_tsc), we will compute an offset value of zero.
//
// This is effectively cheating to make our "setSystemTime" call so
// unbelievably, incredibly fast that we do it "instantly" and all the
// calculations result in an offset of zero.
lastTSC := scaledTSC(rawFreq)
for {
if err := c.setTSC(uint64(lastTSC)); err != nil {
return err
}
nextTSC := scaledTSC(rawFreq)
if lastTSC == nextTSC {
return nil
}
lastTSC = nextTSC // Try again.
}
}
// nonCanonical generates a canonical address return.
//
//go:nosplit
func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
*info = linux.SignalInfo{
Signo: signal,
Code: linux.SI_KERNEL,
}
info.SetAddr(addr) // Include address.
return hostarch.NoAccess, platform.ErrContextSignal
}
// fault generates an appropriate fault return.
//
//go:nosplit
func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := ring0.ReadCR2()
code, user := c.ErrorCode()
if !user {
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
*info = linux.SignalInfo{Signo: signal}
info.SetAddr(uint64(faultAddr))
accessType := hostarch.AccessType{}
if signal == int32(unix.SIGSEGV) {
accessType = hostarch.AccessType{
Read: code&(1<<1) == 0,
Write: code&(1<<1) != 0,
Execute: code&(1<<4) != 0,
}
}
if !accessType.Write && !accessType.Execute {
info.Code = 1 // SEGV_MAPERR.
} else {
info | {
return err
} | conditional_block |
machine_amd64.go | )
// Set base control registers.
kernelSystemRegs.CR0 = c.CR0()
kernelSystemRegs.CR4 = c.CR4()
kernelSystemRegs.EFER = c.EFER()
// Set the IDT & GDT in the registers.
kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT()
kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT()
kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode)
kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata)
kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata)
kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata)
tssBase, tssLimit, tss := c.TSS()
kernelSystemRegs.TR.Load(tss, ring0.Tss)
kernelSystemRegs.TR.base = tssBase
kernelSystemRegs.TR.limit = uint32(tssLimit)
// Point to kernel page tables, with no initial PCID.
kernelSystemRegs.CR3 = c.machine.kernel.PageTables.CR3(false, 0)
// Initialize the PCID database.
if hasGuestPCID {
// Note that NewPCIDs may return a nil table here, in which
// case we simply don't use PCID support (see below). In
// practice, this should not happen, however.
c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs)
}
// Set the CPUID; this is required before setting system registers,
// since KVM will reject several CR4 bits if the CPUID does not
// indicate the support is available.
if err := c.setCPUID(); err != nil {
return err
}
// Set the entrypoint for the kernel.
kernelUserRegs.RIP = uint64(ring0.AddrOfStart())
kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer())
kernelUserRegs.RSP = c.StackTop()
kernelUserRegs.RFLAGS = ring0.KernelFlagsSet
// Set the system registers.
if err := c.setSystemRegisters(&kernelSystemRegs); err != nil {
return err
}
// Set the user registers.
if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 {
return fmt.Errorf("error setting user registers: %v", errno)
}
// Set the time offset to the host native time.
return c.setSystemTime()
}
// bitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio.
// It is set using getBitsForScaling when the KVM platform is initialized.
var bitsForScaling int64
// getBitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio. This allows us to replicate the (bad) math done by
// the kernel below in scaledTSC, and ensure we can compute an exact zero
// offset in setSystemTime.
//
// These constants correspond to kvm_tsc_scaling_ratio_frac_bits.
func getBitsForScaling() int64 {
fs := cpuid.HostFeatureSet()
if fs.Intel() {
return 48 // See vmx.c (kvm sources).
} else if fs.AMD() {
return 32 // See svm.c (svm sources).
} else {
return 63 // Unknown: theoretical maximum.
}
}
// scaledTSC returns the host TSC scaled by the given frequency.
//
// This assumes a current frequency of 1. We require only the unitless ratio of
// rawFreq to some current frequency. See setSystemTime for context.
//
// The kernel math guarantees that all bits of the multiplication and division
// will be correctly preserved and applied. However, it is not possible to
// actually store the ratio correctly. So we need to use the same schema in
// order to calculate the scaled frequency and get the same result.
//
// We can assume that the current frequency is (1), so we are calculating a
// strict inverse of this value. This simplifies this function considerably.
//
// Roughly, the returned value "scaledTSC" will have:
// scaledTSC/hostTSC == 1/rawFreq
//
//go:nosplit
func scaledTSC(rawFreq uintptr) int64 {
scale := int64(1 << bitsForScaling)
ratio := big.NewInt(scale / int64(rawFreq))
ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc())))
ratio.Div(ratio, big.NewInt(scale))
return ratio.Int64()
}
// setSystemTime sets the vCPU to the system time.
func (c *vCPU) setSystemTime() error {
// Attempt to set the offset directly. This is supported as of Linux 5.16,
// or commit 828ca89628bfcb1b8f27535025f69dd00eb55207.
if err := c.setTSCOffset(); err == nil {
return err
}
// If tsc scaling is not supported, fallback to legacy mode.
if !c.machine.tscControl {
return c.setSystemTimeLegacy()
}
// First, scale down the clock frequency to the lowest value allowed by
// the API itself. How low we can go depends on the underlying
// hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD.
// Even the lower bound here will take a 4GHz frequency down to 1Hz,
// meaning that everything should be able to handle a Khz setting of 1
// with bits to spare.
//
// Note that reducing the clock does not typically require special
// capabilities as it is emulated in KVM. We don't actually use this
// capability, but it means that this method should be robust to
// different hardware configurations.
rawFreq, err := c.getTSCFreq()
if err != nil {
return c.setSystemTimeLegacy()
}
if err := c.setTSCFreq(1); err != nil {
return c.setSystemTimeLegacy()
}
// Always restore the original frequency.
defer func() {
if err := c.setTSCFreq(rawFreq); err != nil {
panic(err.Error())
}
}()
// Attempt to set the system time in this compressed world. The
// calculation for offset normally looks like:
//
// offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc());
//
// So as long as the kvm_scale_tsc component is constant before and
// after the call to set the TSC value (and it is passes as the
// target_tsc), we will compute an offset value of zero.
//
// This is effectively cheating to make our "setSystemTime" call so
// unbelievably, incredibly fast that we do it "instantly" and all the
// calculations result in an offset of zero.
lastTSC := scaledTSC(rawFreq)
for {
if err := c.setTSC(uint64(lastTSC)); err != nil {
return err
}
nextTSC := scaledTSC(rawFreq)
if lastTSC == nextTSC {
return nil
}
lastTSC = nextTSC // Try again.
}
}
// nonCanonical generates a canonical address return.
//
//go:nosplit
func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
*info = linux.SignalInfo{
Signo: signal,
Code: linux.SI_KERNEL,
}
info.SetAddr(addr) // Include address.
return hostarch.NoAccess, platform.ErrContextSignal
}
// fault generates an appropriate fault return.
//
//go:nosplit
func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := ring0.ReadCR2()
code, user := c.ErrorCode()
if !user {
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
*info = linux.SignalInfo{Signo: signal}
info.SetAddr(uint64(faultAddr))
accessType := hostarch.AccessType{}
if signal == int32(unix.SIGSEGV) {
accessType = hostarch.AccessType{
Read: code&(1<<1) == 0,
Write: code&(1<<1) != 0,
Execute: code&(1<<4) != 0, | } else {
info.Code | }
}
if !accessType.Write && !accessType.Execute {
info.Code = 1 // SEGV_MAPERR. | random_line_split |
machine_amd64.go | here, in which
// case we simply don't use PCID support (see below). In
// practice, this should not happen, however.
c.PCIDs = pagetables.NewPCIDs(fixedKernelPCID+1, poolPCIDs)
}
// Set the CPUID; this is required before setting system registers,
// since KVM will reject several CR4 bits if the CPUID does not
// indicate the support is available.
if err := c.setCPUID(); err != nil {
return err
}
// Set the entrypoint for the kernel.
kernelUserRegs.RIP = uint64(ring0.AddrOfStart())
kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer())
kernelUserRegs.RSP = c.StackTop()
kernelUserRegs.RFLAGS = ring0.KernelFlagsSet
// Set the system registers.
if err := c.setSystemRegisters(&kernelSystemRegs); err != nil {
return err
}
// Set the user registers.
if errno := c.setUserRegisters(&kernelUserRegs); errno != 0 {
return fmt.Errorf("error setting user registers: %v", errno)
}
// Set the time offset to the host native time.
return c.setSystemTime()
}
// bitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio.
// It is set using getBitsForScaling when the KVM platform is initialized.
var bitsForScaling int64
// getBitsForScaling returns the bits available for storing the fraction component
// of the TSC scaling ratio. This allows us to replicate the (bad) math done by
// the kernel below in scaledTSC, and ensure we can compute an exact zero
// offset in setSystemTime.
//
// These constants correspond to kvm_tsc_scaling_ratio_frac_bits.
func getBitsForScaling() int64 {
fs := cpuid.HostFeatureSet()
if fs.Intel() {
return 48 // See vmx.c (kvm sources).
} else if fs.AMD() {
return 32 // See svm.c (svm sources).
} else {
return 63 // Unknown: theoretical maximum.
}
}
// scaledTSC returns the host TSC scaled by the given frequency.
//
// This assumes a current frequency of 1. We require only the unitless ratio of
// rawFreq to some current frequency. See setSystemTime for context.
//
// The kernel math guarantees that all bits of the multiplication and division
// will be correctly preserved and applied. However, it is not possible to
// actually store the ratio correctly. So we need to use the same schema in
// order to calculate the scaled frequency and get the same result.
//
// We can assume that the current frequency is (1), so we are calculating a
// strict inverse of this value. This simplifies this function considerably.
//
// Roughly, the returned value "scaledTSC" will have:
// scaledTSC/hostTSC == 1/rawFreq
//
//go:nosplit
func scaledTSC(rawFreq uintptr) int64 {
scale := int64(1 << bitsForScaling)
ratio := big.NewInt(scale / int64(rawFreq))
ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc())))
ratio.Div(ratio, big.NewInt(scale))
return ratio.Int64()
}
// setSystemTime sets the vCPU to the system time.
func (c *vCPU) setSystemTime() error {
// Attempt to set the offset directly. This is supported as of Linux 5.16,
// or commit 828ca89628bfcb1b8f27535025f69dd00eb55207.
if err := c.setTSCOffset(); err == nil {
return err
}
// If tsc scaling is not supported, fallback to legacy mode.
if !c.machine.tscControl {
return c.setSystemTimeLegacy()
}
// First, scale down the clock frequency to the lowest value allowed by
// the API itself. How low we can go depends on the underlying
// hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD.
// Even the lower bound here will take a 4GHz frequency down to 1Hz,
// meaning that everything should be able to handle a Khz setting of 1
// with bits to spare.
//
// Note that reducing the clock does not typically require special
// capabilities as it is emulated in KVM. We don't actually use this
// capability, but it means that this method should be robust to
// different hardware configurations.
rawFreq, err := c.getTSCFreq()
if err != nil {
return c.setSystemTimeLegacy()
}
if err := c.setTSCFreq(1); err != nil {
return c.setSystemTimeLegacy()
}
// Always restore the original frequency.
defer func() {
if err := c.setTSCFreq(rawFreq); err != nil {
panic(err.Error())
}
}()
// Attempt to set the system time in this compressed world. The
// calculation for offset normally looks like:
//
// offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc());
//
// So as long as the kvm_scale_tsc component is constant before and
// after the call to set the TSC value (and it is passes as the
// target_tsc), we will compute an offset value of zero.
//
// This is effectively cheating to make our "setSystemTime" call so
// unbelievably, incredibly fast that we do it "instantly" and all the
// calculations result in an offset of zero.
lastTSC := scaledTSC(rawFreq)
for {
if err := c.setTSC(uint64(lastTSC)); err != nil {
return err
}
nextTSC := scaledTSC(rawFreq)
if lastTSC == nextTSC {
return nil
}
lastTSC = nextTSC // Try again.
}
}
// nonCanonical generates a canonical address return.
//
//go:nosplit
func nonCanonical(addr uint64, signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
*info = linux.SignalInfo{
Signo: signal,
Code: linux.SI_KERNEL,
}
info.SetAddr(addr) // Include address.
return hostarch.NoAccess, platform.ErrContextSignal
}
// fault generates an appropriate fault return.
//
//go:nosplit
func (c *vCPU) fault(signal int32, info *linux.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := ring0.ReadCR2()
code, user := c.ErrorCode()
if !user {
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
*info = linux.SignalInfo{Signo: signal}
info.SetAddr(uint64(faultAddr))
accessType := hostarch.AccessType{}
if signal == int32(unix.SIGSEGV) {
accessType = hostarch.AccessType{
Read: code&(1<<1) == 0,
Write: code&(1<<1) != 0,
Execute: code&(1<<4) != 0,
}
}
if !accessType.Write && !accessType.Execute {
info.Code = 1 // SEGV_MAPERR.
} else {
info.Code = 2 // SEGV_ACCERR.
}
return accessType, platform.ErrContextSignal
}
//go:nosplit
//go:noinline
func loadByte(ptr *byte) byte {
return *ptr
}
// SwitchToUser unpacks architectural-details.
func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo) (hostarch.AccessType, error) | {
// Check for canonical addresses.
if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) {
return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info)
} else if !ring0.IsCanonical(regs.Rsp) {
return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info)
} else if !ring0.IsCanonical(regs.Fs_base) {
return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info)
} else if !ring0.IsCanonical(regs.Gs_base) {
return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info)
}
// Assign PCIDs.
if c.PCIDs != nil {
var requireFlushPCID bool // Force a flush?
switchOpts.UserPCID, requireFlushPCID = c.PCIDs.Assign(switchOpts.PageTables)
switchOpts.KernelPCID = fixedKernelPCID
switchOpts.Flush = switchOpts.Flush || requireFlushPCID
}
| identifier_body |
|
main.py | mean square error between the prediction and time-integrator
'''
model.train()
loss_total = 0
mse_total = 0
# Mini-batch loop
for batch_idx, input in enumerate(train_loader):
# input [b, 2, x, y]
# Expand input to match model in channels
dims = torch.ones(len(input.shape))
dims[1] = args.nic
input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
loss = 0
# Loop for number of timesteps
optimizer.zero_grad()
for i in range(tsteps[batch_idx]):
uPred = model(input[:,-2*args.nic:,:])
if(i < tstart[batch_idx]):
# Don't calculate residual, just predict forward
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
else:
# Calculate loss
# Start with implicit time integration
ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt)
# Calc. loss based on posterior of the model
log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader))
loss = loss + log_joint
loss_total = loss_total + loss.data.item()
mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler
# Back-prop through two timesteps
if((i+1)%tback[batch_idx] == 0):
loss.backward()
loss = 0
optimizer.step()
optimizer.zero_grad()
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
else:
input0 = uPred
input = torch.cat([input, input0], dim=1)
if(batch_idx % 10 == 1):
print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \
len(train_loader), int(100*batch_idx/len(train_loader))))
return loss_total/len(train_loader), mse_total/len(train_loader)
def test(args, model, test_loader, tstep=100, test_every=2):
'''
Tests the deterministic model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
model.eval()
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu()
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:,:])
if((t_idx+1)%test_every == 0):
|
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2):
'''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training
model.eval()
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
if(i == 0): # Save target data
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)]
u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
if __name__ == '__main__':
# Parse arguements
args = Parser().parse()
use_cuda = "cpu"
if(torch.cuda.is_available()):
use_cuda = "cuda"
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch device:{}".format(args.device))
# Domain settings, matches solver settings
x0 = 0
x1 = 1.0
args.dx = (x1 - x0)/args.nel
# Create training loader
burgerLoader = BurgerLoader(dt=args.dt)
training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size)
# Create training loader
test_cases = np.array([400, 401]).astype(int)
testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2)
# Create DenseED model
denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc,
blocks=args.blocks,
growth_rate=args.growth_rate,
init_features=args.init_features,
bn_size=args.bn_size,
drop_rate=args.drop_rate,
bottleneck=False,
out_activation=None).to(args.device)
# Bayesian neural network
bayes_nn = BayesNN(args, denseED)
# Stochastic weighted averages
swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max)
# Optimizer
parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta},
{'params': bayes_nn.model.features.parameters()}]
optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0)
# Learning | u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred | conditional_block |
main.py | mean square error between the prediction and time-integrator
'''
model.train()
loss_total = 0
mse_total = 0
# Mini-batch loop
for batch_idx, input in enumerate(train_loader):
# input [b, 2, x, y]
# Expand input to match model in channels
dims = torch.ones(len(input.shape))
dims[1] = args.nic
input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
loss = 0
# Loop for number of timesteps
optimizer.zero_grad()
for i in range(tsteps[batch_idx]):
uPred = model(input[:,-2*args.nic:,:])
if(i < tstart[batch_idx]):
# Don't calculate residual, just predict forward
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
else:
# Calculate loss
# Start with implicit time integration
ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt)
# Calc. loss based on posterior of the model
log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader))
loss = loss + log_joint
loss_total = loss_total + loss.data.item()
mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler
# Back-prop through two timesteps
if((i+1)%tback[batch_idx] == 0):
loss.backward()
loss = 0
optimizer.step()
optimizer.zero_grad()
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
else:
input0 = uPred
input = torch.cat([input, input0], dim=1)
if(batch_idx % 10 == 1):
print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \
len(train_loader), int(100*batch_idx/len(train_loader))))
return loss_total/len(train_loader), mse_total/len(train_loader)
def test(args, model, test_loader, tstep=100, test_every=2):
'''
Tests the deterministic model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
model.eval()
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu()
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2):
'''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training
model.eval()
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
if(i == 0): # Save target data
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)]
u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
if __name__ == '__main__':
# Parse arguements
args = Parser().parse()
use_cuda = "cpu"
if(torch.cuda.is_available()):
use_cuda = "cuda"
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch device:{}".format(args.device))
# Domain settings, matches solver settings
x0 = 0
x1 = 1.0
args.dx = (x1 - x0)/args.nel
# Create training loader
burgerLoader = BurgerLoader(dt=args.dt)
training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size)
# Create training loader
test_cases = np.array([400, 401]).astype(int)
testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2)
# Create DenseED model
denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc, | init_features=args.init_features,
bn_size=args.bn_size,
drop_rate=args.drop_rate,
bottleneck=False,
out_activation=None).to(args.device)
# Bayesian neural network
bayes_nn = BayesNN(args, denseED)
# Stochastic weighted averages
swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max)
# Optimizer
parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta},
{'params': bayes_nn.model.features.parameters()}]
optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0)
# Learning | blocks=args.blocks,
growth_rate=args.growth_rate, | random_line_split |
main.py | mean square error between the prediction and time-integrator
'''
model.train()
loss_total = 0
mse_total = 0
# Mini-batch loop
for batch_idx, input in enumerate(train_loader):
# input [b, 2, x, y]
# Expand input to match model in channels
dims = torch.ones(len(input.shape))
dims[1] = args.nic
input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
loss = 0
# Loop for number of timesteps
optimizer.zero_grad()
for i in range(tsteps[batch_idx]):
uPred = model(input[:,-2*args.nic:,:])
if(i < tstart[batch_idx]):
# Don't calculate residual, just predict forward
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
else:
# Calculate loss
# Start with implicit time integration
ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt)
# Calc. loss based on posterior of the model
log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader))
loss = loss + log_joint
loss_total = loss_total + loss.data.item()
mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler
# Back-prop through two timesteps
if((i+1)%tback[batch_idx] == 0):
loss.backward()
loss = 0
optimizer.step()
optimizer.zero_grad()
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
else:
input0 = uPred
input = torch.cat([input, input0], dim=1)
if(batch_idx % 10 == 1):
print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \
len(train_loader), int(100*batch_idx/len(train_loader))))
return loss_total/len(train_loader), mse_total/len(train_loader)
def test(args, model, test_loader, tstep=100, test_every=2):
'''
Tests the deterministic model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
model.eval()
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu()
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2):
| model.eval()
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
if(i == 0): # Save target data
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)]
u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
if __name__ == '__main__':
# Parse arguements
args = Parser().parse()
use_cuda = "cpu"
if(torch.cuda.is_available()):
use_cuda = "cuda"
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch device:{}".format(args.device))
# Domain settings, matches solver settings
x0 = 0
x1 = 1.0
args.dx = (x1 - x0)/args.nel
# Create training loader
burgerLoader = BurgerLoader(dt=args.dt)
training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size)
# Create training loader
test_cases = np.array([400, 401]).astype(int)
testing_loader = burgerLoader.createTestingLoader(args.data_dir, test_cases, simdt=0.005, batch_size=2)
# Create DenseED model
denseED = DenseED(in_channels=2*args.nic, out_channels=2*args.noc,
blocks=args.blocks,
growth_rate=args.growth_rate,
init_features=args.init_features,
bn_size=args.bn_size,
drop_rate=args.drop_rate,
bottleneck=False,
out_activation=None).to(args.device)
# Bayesian neural network
bayes_nn = BayesNN(args, denseED)
# Stochastic weighted averages
swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max)
# Optimizer
parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta},
{'params': bayes_nn.model.features.parameters()}]
optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0)
# Learning rate | '''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training | identifier_body |
main.py | (args, model, burgerInt, train_loader, optimizer, tsteps, tback, tstart, dt=0.1):
'''
Trains the model
Args:
args (argparse): object with programs arguements
model (PyTorch model): SWAG DenseED model to be tested
burgerInt (BurgerIntegrate): 1D Burger system time integrator
train_loader (dataloader): dataloader with training cases (use createTrainingLoader)
optimizer (Pytorch Optm): optimzer
tsteps (np.array): [mb] number of timesteps to predict for each mini-batch
tback (np.array): [mb] number of timesteps to forward predict before back prop
tstart (np.array): [mb] time-step to start updating model (kept at 0 for now)
dt (float): current time-step size of the model (used to progressively increase time-step size)
Returns:
loss_total (float): negative log joint posterior
mse_total (float): mean square error between the prediction and time-integrator
'''
model.train()
loss_total = 0
mse_total = 0
# Mini-batch loop
for batch_idx, input in enumerate(train_loader):
# input [b, 2, x, y]
# Expand input to match model in channels
dims = torch.ones(len(input.shape))
dims[1] = args.nic
input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
loss = 0
# Loop for number of timesteps
optimizer.zero_grad()
for i in range(tsteps[batch_idx]):
uPred = model(input[:,-2*args.nic:,:])
if(i < tstart[batch_idx]):
# Don't calculate residual, just predict forward
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
else:
# Calculate loss
# Start with implicit time integration
ustar = burgerInt.crankNicolson(uPred, input[:,-2:,:], dt)
# Calc. loss based on posterior of the model
log_joint = model.calc_neg_log_joint(uPred, ustar, len(train_loader))
loss = loss + log_joint
loss_total = loss_total + loss.data.item()
mse_total += F.mse_loss(uPred.detach(), ustar.detach()).item() # MSE for scheduler
# Back-prop through two timesteps
if((i+1)%tback[batch_idx] == 0):
loss.backward()
loss = 0
optimizer.step()
optimizer.zero_grad()
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
else:
input0 = uPred
input = torch.cat([input, input0], dim=1)
if(batch_idx % 10 == 1):
print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \
len(train_loader), int(100*batch_idx/len(train_loader))))
return loss_total/len(train_loader), mse_total/len(train_loader)
def test(args, model, test_loader, tstep=100, test_every=2):
'''
Tests the deterministic model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
model.eval()
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), tstep//test_every+1, 2, args.nel, args.nel)
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
u_out[bidx*mb_size:(bidx+1)*mb_size,0] = input0
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every+1)].cpu()
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, (t_idx+1)//test_every,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10, test_every=2):
'''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
test_every (int): Time-step interval to test (must match simulator), default = 2
Returns:
u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(len(test_loader.dataset), n_samples, (tstep)//test_every + 1, 2, args.nel, args.nel)
u_target = torch.zeros(len(test_loader.dataset), (tstep)//test_every + 1, 2, args.nel, args.nel)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True) # Use diagonal approx. only when training
model.eval()
for bidx, (input0, uTarget0) in enumerate(test_loader):
# Expand input to match model in channels
dims = torch.ones(len(input0.shape))
dims[1] = args.nic
input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
if(i == 0): # Save target data
u_target[bidx*mb_size:(bidx+1)*mb_size] = uTarget0[:,:(tstep//test_every + 1)]
u_out[bidx*mb_size:(bidx+1)*mb_size,i,0,:,:,:] = input0
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-2*args.nic:,:])
if((t_idx+1)%test_every == 0):
u_out[bidx*mb_size:(bidx+1)*mb_size, i, t_idx//test_every+1,:,:,:] = uPred
input = input[:,-2*int(args.nic-1):,:].detach()
input0 = uPred.detach()
input = torch.cat([input, input0], dim=1)
return u_out, u_target
if __name__ == '__main__':
# Parse arguements
args = Parser().parse()
use_cuda = "cpu"
if(torch.cuda.is_available()):
use_cuda = "cuda"
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch device:{}".format(args.device))
# Domain settings, matches solver settings
x0 = 0
x1 = 1.0
args.dx = (x1 - x0)/args.nel
# Create training loader
burgerLoader = BurgerLoader(dt=args.dt)
training_loader = burgerLoader.createTrainingLoader(args.ntrain, args.nel, batch_size=args.batch_size)
# Create training loader
test_cases = np.array([400, 40 | train | identifier_name |
|
index.js | delete all active tokens, by clearing discordUserId2token and token2nethzHash
WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification...
\`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs.
WARNING: doing this is rarely a good idea...
\`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\`
\`!adminhelp\` (admin only): print this message
(Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.)
`;
| const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**.
You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels.
To verify yourself as an ETH student,
1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz;
e.g: \`!nethz ${sampleNethz}\`
2. I will send an email at <nethz>@student.ethz.ch containing a token
3. then, show me that you did receive the token, by telling me: \`!token \` + the token;
e.g: \`!token ${sampleToken}\`
Remarks:
- To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.)
- My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.)
- Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration.
- I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa.
I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly.
`;
const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n
You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n
To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n
${token}\n
If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n
Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n
If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n
\nBest regards,\n
${botName}
`;
// create reusable transporter object using the default SMTP transport
const transporter = nodemailer.createTransport(config.transportOptions);
// verify connection configuration
transporter.verify(function (error, success) {
if (error) {
console.log(error);
} else {
console.assert(success);
console.log("SMTP server is ready to take our messages");
}
});
client.once('ready', async () => {
const theGuild = client.guilds.cache.get(config.theGuildId);
if (!theGuild.available) {
console.warn("theGuild.available is false (it indicates a server outage)");
}
// check that the bot can read/write in the config.adminChannelName channel
const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName);
const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES'];
if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) {
throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`);
}
// create role config.roleName if does not exist
if (!theGuild.roles.cache.some(role => role.name === config.roleName)) {
theGuild.createRole({
name: config.roleName
})
.then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`))
.catch(console.error);
}
// check that we can send email
const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`;
const info = await transporter.sendMail({
from: {
name: client.user.username,
address: botMail.user
},
to: botMail.user,
subject: `Test email (${client.user.username} bot startup)`,
text: textContent,
html: converter.makeHtml(textContent.replace('\n', '\n\n'))
});
console.log("Message sent: %s", info.messageId);
console.log('Ready!');
});
const prefix = config.prefix;
client.on('message', async message => {
if (message.author.bot) return;
if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) {
if (!message.content.startsWith(prefix)) return;
const args = message.content.slice(prefix.length).split(/ +/);
const command = args.shift().toLowerCase();
if (command === 'unmark') {
if (!args.length) {
return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``);
} else {
const nethz = args[0].toLowerCase();
const nethzHash = sha512(nethz, config.commonSalt);
if (! await verifiedNethzHashs.get(nethzHash)) {
return message.channel.send(`This nethz ${nethz} is not currently marked as "already used for verification". No action was performed.`);
} else {
await verifiedNethzHashs.delete(nethzHash);
return message.channel.send(`Unmarked nethz ${nethz} as "already used for verification".`);
}
}
} else if (command === 'mark') {
if (!args.length) {
return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!mark ${sampleNethz}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!mark ${sampleNethz}\``);
} else {
const nethz = args[0].toLowerCase();
const nethzHash = sha512(nethz, config.commonSalt);
if (await verifiedNethzHashs.get(nethzHash)) {
return message.channel.send(`This nethz ${nethz} is already marked as "already used for verification". No action was performed.`);
} else {
await verifiedNethzHashs.set(nethzHash, true);
return message.channel.send(`Marked nethz ${nethz} as "already used for verification".`);
}
}
} else if (command === 'purgereqs') {
if (args.length) {
message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`);
}
await discordUserId2token.clear();
await token2nethzHash.clear();
return message.channel.send(`Cleared all active verification tokens from database. Tip: this leads to unexpected behaviour from the point of view of the users; it might be a good idea to put a message on a public channel to explain what happened.`);
} else if (command === 'purgemarks') {
if (args.length) {
message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`);
}
await verifiedNethzHashs.clear();
return message.channel.send(`Unmarked all previously marked nethzs as "already used for verification".`);
} else if (command === 'verify') { // unusable as it is, because cannot mention Discord users in the admin channel if they are not in it. TODO
if (!args.length) {
return message.channel.send(`You didn't provide any (Discord) user to verify! Usage: e.g \`!verify ${sampleDiscordUsername}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!verify ${sampleDisc | random_line_split |
|
index.js | delete all active tokens, by clearing discordUserId2token and token2nethzHash
WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification...
\`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs.
WARNING: doing this is rarely a good idea...
\`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\`
\`!adminhelp\` (admin only): print this message
(Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.)
`;
const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**.
You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels.
To verify yourself as an ETH student,
1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz;
e.g: \`!nethz ${sampleNethz}\`
2. I will send an email at <nethz>@student.ethz.ch containing a token
3. then, show me that you did receive the token, by telling me: \`!token \` + the token;
e.g: \`!token ${sampleToken}\`
Remarks:
- To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.)
- My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.)
- Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration.
- I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa.
I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly.
`;
const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n
You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n
To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n
${token}\n
If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n
Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n
If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n
\nBest regards,\n
${botName}
`;
// create reusable transporter object using the default SMTP transport
const transporter = nodemailer.createTransport(config.transportOptions);
// verify connection configuration
transporter.verify(function (error, success) {
if (error) {
console.log(error);
} else {
console.assert(success);
console.log("SMTP server is ready to take our messages");
}
});
client.once('ready', async () => {
const theGuild = client.guilds.cache.get(config.theGuildId);
if (!theGuild.available) {
console.warn("theGuild.available is false (it indicates a server outage)");
}
// check that the bot can read/write in the config.adminChannelName channel
const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName);
const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES'];
if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) {
throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`);
}
// create role config.roleName if does not exist
if (!theGuild.roles.cache.some(role => role.name === config.roleName)) {
theGuild.createRole({
name: config.roleName
})
.then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`))
.catch(console.error);
}
// check that we can send email
const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`;
const info = await transporter.sendMail({
from: {
name: client.user.username,
address: botMail.user
},
to: botMail.user,
subject: `Test email (${client.user.username} bot startup)`,
text: textContent,
html: converter.makeHtml(textContent.replace('\n', '\n\n'))
});
console.log("Message sent: %s", info.messageId);
console.log('Ready!');
});
const prefix = config.prefix;
client.on('message', async message => {
if (message.author.bot) return;
if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) {
if (!message.content.startsWith(prefix)) return;
const args = message.content.slice(prefix.length).split(/ +/);
const command = args.shift().toLowerCase();
if (command === 'unmark') {
if (!args.length) {
return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``);
} else {
const nethz = args[0].toLowerCase();
const nethzHash = sha512(nethz, config.commonSalt);
if (! await verifiedNethzHashs.get(nethzHash)) {
return message.channel.send(`This nethz ${nethz} is not currently marked as "already used for verification". No action was performed.`);
} else |
}
} else if (command === 'mark') {
if (!args.length) {
return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!mark ${sampleNethz}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!mark ${sampleNethz}\``);
} else {
const nethz = args[0].toLowerCase();
const nethzHash = sha512(nethz, config.commonSalt);
if (await verifiedNethzHashs.get(nethzHash)) {
return message.channel.send(`This nethz ${nethz} is already marked as "already used for verification". No action was performed.`);
} else {
await verifiedNethzHashs.set(nethzHash, true);
return message.channel.send(`Marked nethz ${nethz} as "already used for verification".`);
}
}
} else if (command === 'purgereqs') {
if (args.length) {
message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`);
}
await discordUserId2token.clear();
await token2nethzHash.clear();
return message.channel.send(`Cleared all active verification tokens from database. Tip: this leads to unexpected behaviour from the point of view of the users; it might be a good idea to put a message on a public channel to explain what happened.`);
} else if (command === 'purgemarks') {
if (args.length) {
message.channel.send(`Warning: !${command} normally does not take any arguments. Arguments were ignored.`);
}
await verifiedNethzHashs.clear();
return message.channel.send(`Unmarked all previously marked nethzs as "already used for verification".`);
} else if (command === 'verify') { // unusable as it is, because cannot mention Discord users in the admin channel if they are not in it. TODO
if (!args.length) {
return message.channel.send(`You didn't provide any (Discord) user to verify! Usage: e.g \`!verify ${sampleDiscordUsername}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!verify ${ | {
await verifiedNethzHashs.delete(nethzHash);
return message.channel.send(`Unmarked nethz ${nethz} as "already used for verification".`);
} | conditional_block |
index.js | (mention) {
// The id is the first and only match found by the RegEx.
const matches = mention.match(/^<@!?(\d+)>$/);
// If supplied variable was not a mention, matches will be null instead of an array.
if (!matches) return;
// However the first element in the matches array will be the entire mention, not just the ID, so use index 1.
const id = matches[1];
return client.users.cache.get(id);
}
const HOURS_TO_MILLISECONDS = 3600 * 1000;
const client = new Discord.Client();
const converter = new showdown.Converter();
// use Keyv with sqlite storage
const sqlite_uri = "sqlite://db.sqlite3";
const discordUserId2token = new Keyv(sqlite_uri, { namespace: "discord_user_id_to_token" }); // Discord User-ID / token pairs
const token2nethzHash = new Keyv(sqlite_uri, { namespace: "token_to_nethz_hash" }); // nethz / token pairs
const verifiedNethzHashs = new Keyv(sqlite_uri, { namespace: "verified_nethz_hashs" }); // the set of hashs of nethzs already used for verification (only the keys are relevant; value is always `true`)
discordUserId2token.on('error', err => console.error('Keyv connection error:', err));
token2nethzHash.on('error', err => console.error('Keyv connection error:', err));
verifiedNethzHashs.on('error', err => console.error('Keyv connection error:', err));
client.login(config.token);
const botMail = config.transportOptions.auth;
const sampleNethz = "jsmith";
const sampleToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9";
const sampleDiscordUsername = "john_sm_01";
const availableCommandsStr = `Available commands:
\`!ping\`: make me say Pong
\`!nethz\`: tell me your nethz; e.g \`!nethz ${sampleNethz}\`
\`!token\`: tell me the token I sent you; e.g \`!token ${sampleToken}\`
\`!welcomeagain\`: **print the welcome message again, with all the instructions for the verification process**
\`!help\`: print this message
`;
const adminCommandsStr = `Admin-only commands:
\`!unmark\` (admin only): unmark a nethz as "already used for verification"; e.g \`!unmark ${sampleNethz}\`
\`!mark\` (admin only): mark a nethz as "already used for verification"; e.g \`!mark ${sampleNethz}\`
\`!purgereqs\` (admin only): delete all active tokens, by clearing discordUserId2token and token2nethzHash
WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification...
\`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs.
WARNING: doing this is rarely a good idea...
\`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\`
\`!adminhelp\` (admin only): print this message
(Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.)
`;
const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**.
You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels.
To verify yourself as an ETH student,
1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz;
e.g: \`!nethz ${sampleNethz}\`
2. I will send an email at <nethz>@student.ethz.ch containing a token
3. then, show me that you did receive the token, by telling me: \`!token \` + the token;
e.g: \`!token ${sampleToken}\`
Remarks:
- To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.)
- My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.)
- Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration.
- I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa.
I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly.
`;
const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n
You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n
To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n
${token}\n
If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n
Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n
If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n
\nBest regards,\n
${botName}
`;
// create reusable transporter object using the default SMTP transport
const transporter = nodemailer.createTransport(config.transportOptions);
// verify connection configuration
transporter.verify(function (error, success) {
if (error) {
console.log(error);
} else {
console.assert(success);
console.log("SMTP server is ready to take our messages");
}
});
client.once('ready', async () => {
const theGuild = client.guilds.cache.get(config.theGuildId);
if (!theGuild.available) {
console.warn("theGuild.available is false (it indicates a server outage)");
}
// check that the bot can read/write in the config.adminChannelName channel
const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName);
const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES'];
if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) {
throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`);
}
// create role config.roleName if does not exist
if (!theGuild.roles.cache.some(role => role.name === config.roleName)) {
theGuild.createRole({
name: config.roleName
})
.then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`))
.catch(console.error);
}
// check that we can send email
const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`;
const info = await transporter.sendMail({
from: {
name: client.user.username,
address: botMail.user
},
to: botMail.user,
subject: `Test email (${client.user.username} bot startup)`,
text: textContent,
html: converter.makeHtml(textContent.replace('\n', '\n\n'))
});
console.log("Message sent: %s", info.messageId);
console.log('Ready!');
});
const prefix = config.prefix;
client.on('message', async message => {
if (message.author.bot) return;
if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) {
if (!message.content.startsWith(prefix)) return;
const args = message.content.slice(prefix.length).split(/ +/);
const command = args.shift().toLowerCase();
if (command === 'unmark') {
if (!args.length) {
return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``);
} else if (args.length > 1) {
return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``);
} else {
const nethz = args[0].toLowerCase();
const nethzHash = sha512(nethz, config.commonSalt);
if (! await verified | getUserFromMention | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.