file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
licensePlateDetectorOptimized.py | model = tf.keras.models.load_model(folder_path + "kerasModelandData/model.h5") #getting the model and loading it
########################################################################################
#################################### GENERAL SETUP #####################################
########################################################################################
def skip_forward(): #a function to skip forward in the video
frame_count = cap.get(cv.CAP_PROP_POS_FRAMES)
cap.set(cv.CAP_PROP_POS_FRAMES, frame_count + 1000)
def setup_dictionary(): #a function to set up the dictionary mapping values to letters for the NN
alphabet = open(folder_path + "kerasModelandData/alphabet.txt")
for count, line in enumerate(alphabet.readlines()):
letter_dict[count] = line[0]
print(letter_dict)
### Class for plate detection
########################################################################################
#################################### SETUP PROGRAM! ####################################
########################################################################################
class FindPlate:
#should maybe make the parameters global variables or controlled by the command line
# Have to adjust so that the min and max are larger when analyzing the images and smaller when looking at the vids
def __init__(self, counter, check_wait = False, imgAddress = None, img = None):
self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys()
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def show_images_exec(self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, | letter_dict = {} #init the letter dictionary to get the letters that are put through the NN | random_line_split |
|
licensePlateDetectorOptimized.py |
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def show_images_exec(self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################ | self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys() | identifier_body |
|
licensePlateDetectorOptimized.py | This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def | (self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
| show_images_exec | identifier_name |
SelfOrganizingMap.py | (self, iteration, epoch):
#############################
# Description: Update the neighborhood size and learning rate
# iteration: number of current iteration
# epoch: total epochs to run the SOM for
########################################
self.neighborhoodSize = self.neighborhoodSizeInitial * (1 - (iteration / epoch))
self.learningRate = self.learningRateInitial * (1 - (iteration / epoch))
def find_neighbor_indices(self, i, j):
# function finds the neighboring rows and columns to include
# i : i-th index
# j : j-th index
# dist: how big the neighborhood should span
#########################################################
rows = []
columns = []
# python indexing starts with 0 so adjust here
i = i + 1
j = j + 1
if i > self.hiddenSize[0] or i < 1 or j > self.hiddenSize[1] or j < 1:
neighborhood = set()
return neighborhood
rows = np.arange(i - int(self.neighborhoodSize), i + int(self.neighborhoodSize) + 1)
columns = np.arange(j - int(self.neighborhoodSize), j + int(self.neighborhoodSize) + 1)
# get neighbor indexes as a combination of rows and columns
neighborhood = set()
for row in rows:
for column in columns:
#do not do wrap around neighborhood
if row > self.hiddenSize[0] or row < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1 | update_parameters | identifier_name |
|
SelfOrganizingMap.py | ighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]):
#find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborHood_temp)
return Umatrix
def plotChange(self, weight_change_magnitude,showPlot ,path):
plt.figure()
plt.plot(np.arange(0,len(weight_change_magnitude)),weight_change_magnitude)
plt.xlabel('Epochs',fontsize = 22)
plt.ylabel('Weight change magnitude',fontsize = 22)
plt.ylim(0,max(weight_change_magnitude))
plt.xlim(0,len(weight_change_magnitude))
plt.savefig(path+'_weight_change.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
def plotMap(self, inputs, epoch, showPlot,windowSize, path = 'plot_epoch', targets = [],Umatrix = [], labels= [], legend_dict = {}, write = False):
if len(legend_dict.keys())==0:
setOfLabels= set(labels)
for l in setOfLabels:
legend_dict[l] = l
#colors to label points
colors = ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2','#f768a1','#c7e9c0','#74c476','#238b45','#fec44f']
#colors = ['#d7191c','#abdda4','#2b83ba']
#for legend
colors_used = set()
# plot observations with labels
plt.figure(figsize = (6,6))
#plot Umatrix first so other stuff is over it
if len(Umatrix)> 0:
#normalize U matrix
#ignore the zero padding for minimum
min_v = Umatrix.min()
max_v = Umatrix.max()
Umatrix = (Umatrix - min_v) / (max_v - min_v)
#update values less than 0 to zero
Umatrix[Umatrix<0] = 0
Umatrix = Umatrix * 255
plt.imshow(Umatrix.transpose(), cmap='Greys',alpha=1)
plotted = set()
#write to CSV
all_data = []
node_to_scatterSize = defaultdict(lambda: 30)
for i in range(0, len(inputs)):
input = inputs[i]
input = np.reshape(input, (1, np.shape(input)[0],np.shape(input)[1]))
winnderNode = self.findWinningNode(input, windowSize)
# convert to x - y coordinate
coord = np.unravel_index(winnderNode, self.hiddenSize)
#save to dict for writing to csv
d = {}
d['x'] = coord[0]
d['y'] = coord[1]
if len(targets)> 0:
d['target'] = targets[i]
if len(labels) >0:
d['labels'] = labels[i]
all_data.append(d)
if coord in plotted:
#just add some noise so duck and goose can show seperately. They are exactly the same as per data
#shift = random.uniform(1, 4)
#shift = 0
x = coord[0]
y = coord[1] #+ shift
node_to_scatterSize[winnderNode] += 30
#print('scatter size increased.')
#print(node_to_scatterSize[winnderNode])
else:
plotted.add(coord)
x = coord[0]
y = coord[1]
#scatter plot at same location but annotation differently
#plt.scatter(coord[0], coord[1], s=30, color = '#2ca25f')
if len(labels)>0:
color = colors[labels[i]]
else:
color ='#2ca25f'
if color in colors_used:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
else:
colors_used.add(color)
if len(labels)>0:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color,label = legend_dict[labels[i]])
else:
plt.scatter(x, y, s=node_to_scatterSize[winnderNode], color = color)
#if len(targets)> 0:
# plt.annotate(targets[i], (x, y), fontsize=22)
plt.xlim(0 - 5, self.hiddenSize[0] + 5)
plt.ylim(0 - 5, self.hiddenSize[1] + 5)
plt.xlabel('Nodes', fontsize=22)
plt.ylabel('Nodes', fontsize=22)
plt.xticks([])
plt.yticks([])
plt.title('Kohonen Self-Organizing Map', fontsize=22)
if len(labels)>0:
plt.legend(fontsize = 18,bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(path+'_'+str(epoch)+'.png',bbox_inches = 'tight')
if showPlot == 'True':
plt.show()
else:
plt.close()
return all_data
def getWeights(self):
return self.weights_Kohonen
def getActivations(self):
return self.gridActivation
def getUmatrix(self):
return self.Umatrix
def sq_euc(self,s1, s2):
#author: Ali Javed
#email: [email protected]
#Version history:
#Version 1 . basis implementation of dynaimc time warping dependant.
#Version 2 (7 Nov 2019). changed variable names to be more representative and added comments.
#Inputs
#s1: signal 1, size 1 * m * n. where m is the number of variables, n is the timesteps.
#s2: signal 2, size 1 * m * n. where m is the number of variables, n is the timesteps.
#OUTPUT
#dist: Squared euclidean distance
| dist = ((s1 - s2) ** 2)
return dist.flatten().sum() | identifier_body |
|
SelfOrganizingMap.py | 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
|
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]):
#find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy(neighborH | for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance | conditional_block |
SelfOrganizingMap.py | < 1 or column > self.hiddenSize[1] or column < 1:
continue
row = row % self.hiddenSize[0]
column = column % self.hiddenSize[1]
if row == 0:
row = self.hiddenSize[0]
if column == 0:
column = self.hiddenSize[1]
# do not update actual row, because it is used in the loop
row_temp = row - 1
column_temp = column - 1
neighborhood.add((row_temp, column_temp))
return neighborhood
def findWinningNode(self, x, windowSize):
## function to find winning node
#: input observatiopn
# format input for use in this function --- dtw distance
# x = np.reshape(x[0], (1, 1, len(x[0])))
####################################
# calculate distances (in Euclidean and DTW it is the minimum). Iterate over all nodes to find distance
distances = np.zeros((self.hiddenSize[0], self.hiddenSize[1]))
distances = distances + float('inf')
for i in range(0, self.hiddenSize[0]):
for j in range(0, self.hiddenSize[1]):
# get weights associated to i-th and j-th node
weights = self.weights_Kohonen[i, j, :,:]
# make sure correct shape
weights = np.reshape(weights, (1, np.shape(weights)[0], np.shape(weights)[1]))
# format inputs for dynamic time warping
# use dynamic time warping as distance measure which has a window size parameter
d1 = np.reshape(x,(1,np.shape(x)[1],np.shape(x)[2]))
d2 = np.reshape(weights, (1, np.shape(weights)[1], np.shape(weights)[2]))
distance = self.dtw_d(d1, d2, windowSize)
if distance != distance:
print('DTW error: Nan Value')
distances[i, j] = distance
# minimum value is winner
winnerNeuronIndex = np.argmin(distances)
return winnerNeuronIndex
def propogateForward(self, x, windowSize):
############
# Description: Function forward propogates from input to grid
# x: single input
# windowSize: window size for dynamic time warping
##############################
# input to Kohonen
##############################
# make sure x is in correct shape for matrix multiplication
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
self.winnerNeuronIndex = self.findWinningNode(x, windowSize)
def update_weights_Kohonen(self, x):
############
# Description: Function updates the Kohonen layer (SOM layer) after one forward pass (i.e., forwardPropogate)
# x: single input
# make sure x is in correct shape
##############
x = np.reshape(x, (1, np.shape(x)[0], np.shape(x)[1]))
# convert the single winning index into grid location (x, and y cordinate)
two_dIndex = np.unravel_index(self.winnerNeuronIndex, self.hiddenSize)
neighborhood = self.find_neighbor_indices(two_dIndex[0], two_dIndex[1])
# implement update formula to update all neighborhood
for neighbors in neighborhood:
i = neighbors[0]
j = neighbors[1]
# calculate the update
loss = 0
for m in range(0,np.shape(x)[1]):
update = self.learningRate * (x[0,m,:] - self.weights_Kohonen[i, j, m,:])
# update the weights
self.weights_Kohonen[i, j, m,:] = self.weights_Kohonen[i, j, m,:] + update
loss+= sum(abs(update))
return loss
def iterate(self, inputs, epochs, path = '',windowSize = 0, targets = [], labels = [],legend_dict = {}, showPlot = 'False'):
############
# Description: Function iterates to organize the Kohonen layer
# inputs: all inputs
# epochs: epochs to iterate for
# path: Path to save SOM plots
# windowSize: windowSize to be used by DTW (for project), not usefull in assignment and set to 0.
# targets: keys or ids for each observation
# labels: labels, or ground truth.
#returns
#all_data: list of dictionary objects with 4 keys, 'target','x','y', and 'labels'
# x: x coordinate on SOM
# y: y cordinate on SOM
# target: keys or name of observation if provided.
# label: label (ground truth) of observation if provided.
##############
# reinitilize weights based on inputs
# initilize weights between 0 and 1 for a 3d weights matrix
self.weights_Kohonen = np.random.uniform(low=0, high=1,
size=(self.hiddenSize[0], self.hiddenSize[1], self.dim,self.inputSize))
# formula for weights initilization
for i in range(0, np.shape(inputs)[1]):
for j in range(0, np.shape(inputs)[2]):
firstPart = (np.mean(np.asarray(inputs)[:,i, j]) + np.random.uniform(-0.1, 0.1))
secondPart = (np.mean(np.asarray(inputs)[:,i, j]) * np.random.uniform(low=0, high=1, size=(self.hiddenSize[0], self.hiddenSize[1])))
weights = firstPart * secondPart
self.weights_Kohonen[:, :, i, j] = weights
#######################
# for epochs
weight_change_magnitude = []
for epoch in range(0, epochs):
# for each input
epoch_loss = 0
for i in range(0, len(inputs)):
# propogate forward
self.propogateForward(inputs[i], windowSize)
# update Kohonen
loss = self.update_weights_Kohonen(inputs[i])
epoch_loss += loss
if epoch % 20 == 0:
print('Epoch : ' + str(epoch) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
if len(path)>0:
self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,legend_dict = legend_dict )
self.update_parameters(epoch, epochs)
if self.neighborhoodSize < 1:
self.neighborhoodSize = 1
if self.learningRate < 0.2:
self.learningRate = 0.2
weight_change_magnitude.append(epoch_loss)
print('Epoch : ' + str(epoch + 1) + ' complete.')
print('Neighborhood Size : '+str(self.neighborhoodSize))
print('Learning Rate : '+str(self.learningRate))
print('**************************************')
#get Umatrix
self.Umatrix = self.createUmatrix(windowSize)
if len(path)>0:
all_data = self.plotMap(inputs, epoch, showPlot, windowSize,path,targets,labels = labels,Umatrix = self.Umatrix,legend_dict = legend_dict ,write = False) #,
self.plotChange(weight_change_magnitude,showPlot, path)
return all_data
def createUmatrix(self,windowSize):
#############################
# Description: create the umatrix
# windowSize: window size for dynamic time warping. Umatrix function currentl always uses Euclidean distance to avoid theoritical issues.
########################################
#set neighborhood size to 1 and reset it after the function
neighborHood_temp = copy.deepcopy(self.neighborhoodSize)
self.neighborhoodSize = 1
| #find all the neighbors for node at i,j
neighbors = self.find_neighbor_indices(i, j)
#remove self
neighbors.remove((i, j))
#get weights for node at i,j
weights = self.weights_Kohonen[i,j,:]
weights = np.reshape(weights, (1,np.shape(weights)[0],np.shape(weights)[1]))
#for all neighbors
for neighbor in neighbors:
#get x,y (i,j) coordinate for neighbor
x = neighbor[0]
y = neighbor[1]
#get neighbor weights
neighborWeights = self.weights_Kohonen[x,y,:]
neighborWeights = np.reshape(neighborWeights, (1, np.shape(neighborWeights)[0],np.shape(neighborWeights)[1]))
# use dynamic time warping as distance measure which has a window size parameter
#always use Euc distance for Umatrix
distance = self.dtw_d(weights, neighborWeights,0)
Umatrix[i,j] += distance
Umatrix[i,j] = Umatrix[i,j] / len(neighbors)
#reset neighborhoodSize
self.neighborhoodSize = copy.deepcopy( | Umatrix = np.zeros((self.hiddenSize[0],self.hiddenSize[1]))
# Perform 2D convolution with input data and kernel to get sum of neighboring nodes
for i in range(0,self.hiddenSize[0]):
for j in range(0,self.hiddenSize[1]): | random_line_split |
train.py | #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
|
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, | f.close()
| random_line_split |
train.py | #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
f.close()
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
| te the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, med | # compu | identifier_name |
train.py | #训练集变化不大时使训练加速
def logging_func(log_file, message):
with open(log_file,'a') as f:
f.write(message)
f.close()
def main():
setup_seed(1024)
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='D:/data/',
help='path to datasets')
parser.add_argument('--data_name', default='f30k_precomp',
help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/',
help='Path to saved vocabulary json files.')
parser.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
parser.add_argument('--grad_clip', default=2.0, type=float,
help='Gradient clipping threshold.')
parser.add_argument('--num_epochs', default=20, type=int,
help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int,
help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=.0002, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int,
help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=0, type=int,
help='Number of data loader workers.')
parser.add_argument('--log_step', default=100, type=int,
help='Number of steps to print and record the log.')
parser.add_argument('--logger_name', default='./runs/test2',
help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/test2',
help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), op | if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, med | t.grad_clip)
optimizer.step()
| conditional_block |
train.py | ('--img_dim', default=2048, type=int,
help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true',
help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true',
help='Do not normalize the text embeddings.')
parser.add_argument('--correct_type', default="prob",
help='equal|prob')
parser.add_argument('--precomp_enc_type', default="basic",
help='basic|weight_norm')
parser.add_argument('--bi_gru', action='store_true', default=True,
help='Use bidirectional GRU.')
parser.add_argument('--lambda_softmax', default=20., type=float,
help='Attention softmax temperature.')
opt = parser.parse_known_args()[0]
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('train')
# Load Vocabulary Wrapper
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
word2idx = vocab.word2idx
opt.vocab_size = len(vocab)
# Load data loaders
train_loader, val_loader = get_loaders(
opt.data_name, vocab, opt.batch_size, opt.workers, opt)
# Construct the model
model = SCAN(word2idx, opt)
model.cuda()
model = nn.DataParallel(model)
criterion = ContrastiveLoss(margin=opt.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
best_rsum = 0
start_epoch = 0
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch'] + 1
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Eiters is used to show logs as the continuation of another
# training
# model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
.format(opt.resume, start_epoch, best_rsum))
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Train the Model
for epoch in range(start_epoch, opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
if not os.path.exists(opt.model_name):
os.makedirs(opt.model_name)
message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
log_file = os.path.join(opt.logger_name, "performance.log")
logging_func(log_file, message)
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
train(opt, train_loader, model, criterion, optimizer, epoch, val_loader)
# evaluate on validation set
rsum = validate(opt, val_loader, model)
# remember best R@ sum and save checkpoint
is_best = rsum > best_rsum
best_rsum = max(rsum, best_rsum)
if not os.path.exists(opt.model_name):
os.mkdir(opt.model_name)
save_checkpoint({
'epoch': epoch,
'model': model.state_dict(),
'best_rsum': best_rsum,
'opt': opt,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=opt.model_name + '/')
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.images, self.captions, self.length, self.index = next(self.loader)
except StopIteration:
self.images, self.captions, self.length, self.index = None, None, None, None
return
with torch.cuda.stream(self.stream):
self.images = self.images.cuda()
self.captions = self.captions.cuda()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
self.preload()
return self.images, self.captions, self.length, self.index
def train(opt, train_loader, model, criterion, optimizer, epoch, val_loader):
# average meters to record the training statistics
batch_time = AverageMeter()
data_time = AverageMeter()
train_logger = LogCollector()
run_time = 0
start_time = time.time()
prefetcher = DataPrefetcher(train_loader)
images, captions, lengths, index = prefetcher.next()
i = 0
while images is not None:
# switch to train mode
model.train()
# measure data loading time
model.logger = train_logger
optimizer.zero_grad()
# Update the model
if torch.cuda.device_count() > 1:
images = images.repeat(torch.cuda.device_count(), 1, 1)
score = model(images, captions, lengths, index)
loss = criterion(score)
loss.backward()
if opt.grad_clip > 0:
clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (i + 1) % opt.log_step == 0:
run_time += time.time() - start_time
log = "epoch: %d; batch: %d/%d; loss: %.6f; time: %.4f" % (epoch,
i, len(train_loader), loss.data.item(),
run_time)
print(log, flush=True)
start_time = time.time()
run_time = 0
# validate at every val_step
images, captions, lengths, index = prefetcher.next()
i += 1
def validate(opt, val_loader, model):
# compute the encoding for all the validation images and captions
img_embs, img_means, cap_embs, cap_lens, cap_means = encode_data(
model, val_loader, opt.log_step, logging.info)
print(img_embs.shape, cap_embs.shape)
img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
sims = shard_xattn(model, img_embs, img_means, cap_embs, cap_lens, cap_means, opt, shard_size=128)
end = time.time()
print("calculate similarity time:", end-start)
# caption retrieval
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1, r5, r10, medr, meanr))
# image retrieval
(r1i, r5i, r10i, medri, meanr) = t2i(
img_embs, cap_embs, cap_lens, sims)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
(r1i, r5i, r10i, medri, meanr))
# sum of recalls to be used for early stopping
currscore = r1 + r5 + r10 + r1i + r5i + r10i
return currscore
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
torch.save(state, prefix + filename)
if is_best:
message = "--------save best model at epoch %d---------\n" % (state["epoch"] - 1)
print(message, flush=True)
log_file = os.path.join(prefix, "performance.log")
logging_func(log_file, message)
shutil.copyfile(prefix + filename, prefix + 'model_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(opt, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the sp | ecified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
| identifier_body |
|
android_bootldr_qcom.go | asetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom |
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1 | {
return &AndroidBootldrQcom{
}
} | identifier_body |
android_bootldr_qcom.go | leasetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
* | * The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
| random_line_split |
|
android_bootldr_qcom.go | asetools.py#105)
* - Nexus 7 \[2013] (Wi-Fi) "flo" <a href="#doc-note-data-after-img-bodies">(\**)</a>: [sample][sample-flo] ([other samples][others-flo]),
* [releasetools.py](https://android.googlesource.com/device/asus/flo/+/9d9fee9/releasetools.py#130)
*
* * MSM8996 Pro-AB ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_820_and_821_(2016)))
* - Pixel "sailfish" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-sailfish] ([other samples][others-sailfish])
* - Pixel XL "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) | (io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
| Read | identifier_name |
android_bootldr_qcom.go | "marlin" <a href="#doc-note-bootloader-size">(\*)</a>:
* [sample][sample-marlin] ([other samples][others-marlin])
*
* * MSM8998 ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_835_(2017)))
* - Pixel 2 "walleye" <a href="#doc-note-bootloader-size">(\*)</a>: [sample][sample-walleye] ([other samples][others-walleye])
* - Pixel 2 XL "taimen": [sample][sample-taimen] ([other samples][others-taimen])
*
* <small id="doc-note-bootloader-size">(\*)
* `bootloader_size` is equal to the size of the whole file (not just `img_bodies` as usual).
* </small>
*
* <small id="doc-note-data-after-img-bodies">(\**)
* There are some data after the end of `img_bodies`.
* </small>
*
* ---
*
* On the other hand, devices with these chips **do not** use this format:
*
* * <del>APQ8084</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_800,_801_and_805_(2013/14)))
* - Nexus 6 "shamu": [sample][foreign-sample-shamu] ([other samples][foreign-others-shamu]),
* [releasetools.py](https://android.googlesource.com/device/moto/shamu/+/df9354d/releasetools.py#12) -
* uses "Motoboot packed image format" instead
*
* * <del>MSM8994</del> ([devices](https://en.wikipedia.org/wiki/Devices_using_Qualcomm_Snapdragon_processors#Snapdragon_808_and_810_(2015)))
* - Nexus 6P "angler": [sample][foreign-sample-angler] ([other samples][foreign-others-angler]),
* [releasetools.py](https://android.googlesource.com/device/huawei/angler/+/cf92cd8/releasetools.py#29) -
* uses "Huawei Bootloader packed image format" instead
*
* [sample-mako]: https://androidfilehost.com/?fid=96039337900113996 "bootloader-mako-makoz30f.img"
* [others-mako]: https://androidfilehost.com/?w=search&s=bootloader-mako&type=files
*
* [sample-hammerhead]: https://androidfilehost.com/?fid=385035244224410247 "bootloader-hammerhead-hhz20h.img"
* [others-hammerhead]: https://androidfilehost.com/?w=search&s=bootloader-hammerhead&type=files
*
* [sample-bullhead]: https://androidfilehost.com/?fid=11410963190603870177 "bootloader-bullhead-bhz32c.img"
* [others-bullhead]: https://androidfilehost.com/?w=search&s=bootloader-bullhead&type=files
*
* [sample-deb]: https://androidfilehost.com/?fid=23501681358552487 "bootloader-deb-flo-04.02.img"
* [others-deb]: https://androidfilehost.com/?w=search&s=bootloader-deb-flo&type=files
*
* [sample-flo]: https://androidfilehost.com/?fid=23991606952593542 "bootloader-flo-flo-04.05.img"
* [others-flo]: https://androidfilehost.com/?w=search&s=bootloader-flo-flo&type=files
*
* [sample-sailfish]: https://androidfilehost.com/?fid=6006931924117907154 "bootloader-sailfish-8996-012001-1904111134.img"
* [others-sailfish]: https://androidfilehost.com/?w=search&s=bootloader-sailfish&type=files
*
* [sample-marlin]: https://androidfilehost.com/?fid=6006931924117907131 "bootloader-marlin-8996-012001-1904111134.img"
* [others-marlin]: https://androidfilehost.com/?w=search&s=bootloader-marlin&type=files
*
* [sample-walleye]: https://androidfilehost.com/?fid=14943124697586348540 "bootloader-walleye-mw8998-003.0085.00.img"
* [others-walleye]: https://androidfilehost.com/?w=search&s=bootloader-walleye&type=files
*
* [sample-taimen]: https://androidfilehost.com/?fid=14943124697586348536 "bootloader-taimen-tmz30m.img"
* [others-taimen]: https://androidfilehost.com/?w=search&s=bootloader-taimen&type=files
*
* [foreign-sample-shamu]: https://androidfilehost.com/?fid=745849072291678307 "bootloader-shamu-moto-apq8084-72.04.img"
* [foreign-others-shamu]: https://androidfilehost.com/?w=search&s=bootloader-shamu&type=files
*
* [foreign-sample-angler]: https://androidfilehost.com/?fid=11410963190603870158 "bootloader-angler-angler-03.84.img"
* [foreign-others-angler]: https://androidfilehost.com/?w=search&s=bootloader-angler&type=files
*
* ---
*
* The `bootloader-*.img` samples referenced above originally come from factory
* images packed in ZIP archives that can be found on the page [Factory Images
* for Nexus and Pixel Devices](https://developers.google.com/android/images) on
* the Google Developers site. Note that the codenames on that page may be
* different than the ones that are written in the list above. That's because the
* Google page indicates **ROM codenames** in headings (e.g. "occam" for Nexus 4)
* but the above list uses **model codenames** (e.g. "mako" for Nexus 4) because
* that is how the original `bootloader-*.img` files are identified. For most
* devices, however, these code names are the same.
* @see <a href="https://android.googlesource.com/device/lge/hammerhead/+/7618a7d/releasetools.py">Source</a>
*/
type AndroidBootldrQcom struct {
Magic []byte
NumImages uint32
OfsImgBodies uint32
BootloaderSize uint32
ImgHeaders []*AndroidBootldrQcom_ImgHeader
_io *kaitai.Stream
_root *AndroidBootldrQcom
_parent interface{}
_f_imgBodies bool
imgBodies []*AndroidBootldrQcom_ImgBody
}
func NewAndroidBootldrQcom() *AndroidBootldrQcom {
return &AndroidBootldrQcom{
}
}
func (this *AndroidBootldrQcom) Read(io *kaitai.Stream, parent interface{}, root *AndroidBootldrQcom) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this._io.ReadBytes(int(8))
if err != nil {
return err
}
tmp1 = tmp1
this.Magic = tmp1
if !(bytes.Equal(this.Magic, []uint8{66, 79, 79, 84, 76, 68, 82, 33})) {
return kaitai.NewValidationNotEqualError([]uint8{66, 79, 79, 84, 76, 68, 82, 33}, this.Magic, this._io, "/seq/0")
}
tmp2, err := this._io.ReadU4le()
if err != nil {
return err
}
this.NumImages = uint32(tmp2)
tmp3, err := this._io.ReadU4le()
if err != nil {
return err
}
this.OfsImgBodies = uint32(tmp3)
tmp4, err := this._io.ReadU4le()
if err != nil | {
return err
} | conditional_block |
|
username.go | "veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue", | "cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements",
"conner",
"complains",
"conjugal",
"enumerator",
"inconclusive",
"pipelines",
"synthesizer",
"intimate",
"saturater",
"splintered",
"taxonomy",
"roaring",
"transduction",
"collegial",
"breakdown",
"adducing",
"debenture",
"jeopardy",
"intoxicant",
"rescue",
"phrased",
"cartwheel",
"remedies",
"penguin",
"shined",
"codification",
"impugn",
"doorbell",
"ludlow",
"visibility",
"agglutinins",
"apposition",
"pathogenic",
"bestial",
"present",
"encyclopedic",
"qualifiers",
"realists",
"baptism",
"plasticity",
"transitioned",
"atalanta",
"crucially",
"trackers",
"identities",
"cursors",
"backspace",
"airships",
"multilevel",
"concretely",
"gazette",
"intelligibility",
"cottager",
"denigrated",
"unimpeded",
"matisse",
"thrashed",
"impious",
"ceaseless",
"callisto",
"lollipop",
"defenestrated",
"reredos",
"chemic",
"foulest",
"solemn",
"staley",
"ballfield",
"alameda",
"panaceas",
"nabisco",
"strainer",
"hackmatack",
"hemispheric",
"cogitated",
"customizing",
"pushbutton",
"dressmaker",
"amending",
"penance",
"seasonal",
"chromium",
"offsaddle",
"atrophy",
"souffle",
"platforms",
"wrangle",
"clearness",
"anecdotes",
"hurting",
"tooled",
"angora",
"narrate",
"statistician",
"philosoph",
"assertions",
"indefinitely",
"parsimonious",
"bribing",
"tolerant",
"lilies",
"sulfate",
"righteously",
"stereotypical",
"degeneracy",
"similarity",
"pastimes",
"informed",
"polypropylene",
"backlog",
"typography",
"survivors",
"reconfiguring",
"gadding",
"caryatid",
"scuttling",
"semaphores",
"debugged",
"pacification",
"carbone",
"firearms",
"neurophysiology",
"blazing",
"ballrooms",
"thunderbolts",
"forefather",
"rachel",
"collision",
"reticulately",
"resignations",
"interactions",
"conspirator",
"basilar",
"climaxes",
"draining",
"cabinets",
"checksumming",
"suicide",
"coffees",
"mescaline",
"tininess",
"tinder",
"binomial",
"berates",
"cashed",
"bellwethers",
"carbonation",
"kalamazoo",
"thyroglobulin",
"kidnappers",
"numbed",
"shiftiness",
"presuming",
"achievements",
"amplifiers",
"lurches",
"cataclysmic",
"subvert",
"paragon",
"hoppers",
"lapels",
"recast",
"pitilessly",
"coffins",
"outstretched",
"perceiving",
"thoughtfully",
"taking",
"stems",
"favors",
"streets",
"quieting",
"monoid",
"delectable",
"encoding",
"jejune",
"sincere",
"goober",
"testes",
"lexicon",
"richter",
"covenants",
"pitiers",
| random_line_split |
|
username.go |
var Words = []string{
"acquirable",
"bestsellers",
"farther",
"prizer",
"shasta",
"evaporate",
"auspices",
"garments",
"partnership",
"blocs",
"forestalling",
"razors",
"extensibility",
"unavoidably",
"logician",
"embroidered",
"crippling",
"supranational",
"milton",
"healthily",
"spiraling",
"coolies",
"bartend",
"precondition",
"reflectors",
"judged",
"rinser",
"amplify",
"casseroles",
"physics",
"raider",
"whippet",
"expulsion",
"enzyme",
"prohibit",
"gazers",
"unchangeable",
"matching",
"mouthe",
"millihenry",
"plowshare",
"quicken",
"blackmailing",
"chatham",
"jobbing",
"augustly",
"constitutionality",
"cathodes",
"inspirations",
"seniority",
"staging",
"figuratively",
"beckon",
"rankle",
"buzzwords",
"mccullough",
"justifying",
"antiquities",
"ardency",
"tribunals",
"laughs",
"shakes",
"feedback",
"balustrade",
"mattress",
"seduces",
"attainments",
"counterattack",
"sweeter",
"deforestation",
"digests",
"sacrificed",
"scripts",
"philharmonic",
"legerdemain",
"advancements",
"disburse",
"bottles",
"scatterbrain",
"conceptions",
"planer",
"fishpond",
"tidying",
"illustration",
"dishonoring",
"impostors",
"aspect",
"summations",
"steering",
"cheesy",
"hamlets",
"cryptanalyst",
"ensued",
"upholsterer",
"detaining",
"penned",
"robbers",
"contingency",
"effectively",
"soybean",
"clockings",
"pappas",
"jellies",
"formulae",
"routines",
"savoyard",
"redefining",
"insistently",
"macroscopic",
"taster",
"phosphates",
"midsts",
"invertebrates",
"vices",
"vacancy",
"predominated",
"timeshare",
"convincing",
"paralleling",
"conceived",
"guggenheim",
"paintings",
"dispells",
"incapacitating",
"nostrand",
"pliant",
"sleuth",
"grammar",
"wallows",
"dismisses",
"wilhelm",
"exiling",
"checkers",
"proceedings",
"hoarsely",
"stretches",
"purport",
"limousine",
"inheritresses",
"company",
"thruway",
"hopkinsian",
"downcast",
"dangers",
"anatomically",
"allure",
"stampers",
"executive",
"postmaster",
"depressing",
"dragons",
"countys",
"harriet",
"attire",
"runway",
"bubbled",
"waterman",
"gerhardt",
"honorableness",
"flurry",
"refract",
"bacteria",
"antiques",
"provide",
"mysteriously",
"interrogation",
"discontinuous",
"victrola",
"replications",
"passion",
"thawed",
"alligator",
"documentaries",
"nakedness",
"veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue",
"cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements",
| {
word := Words[rand.Int31n(int32(len(Words)))]
digits := rand.Int31n(1000)
return fmt.Sprintf("%s%d", word, digits)
} | identifier_body |
|
username.go | () string {
word := Words[rand.Int31n(int32(len(Words)))]
digits := rand.Int31n(1000)
return fmt.Sprintf("%s%d", word, digits)
}
var Words = []string{
"acquirable",
"bestsellers",
"farther",
"prizer",
"shasta",
"evaporate",
"auspices",
"garments",
"partnership",
"blocs",
"forestalling",
"razors",
"extensibility",
"unavoidably",
"logician",
"embroidered",
"crippling",
"supranational",
"milton",
"healthily",
"spiraling",
"coolies",
"bartend",
"precondition",
"reflectors",
"judged",
"rinser",
"amplify",
"casseroles",
"physics",
"raider",
"whippet",
"expulsion",
"enzyme",
"prohibit",
"gazers",
"unchangeable",
"matching",
"mouthe",
"millihenry",
"plowshare",
"quicken",
"blackmailing",
"chatham",
"jobbing",
"augustly",
"constitutionality",
"cathodes",
"inspirations",
"seniority",
"staging",
"figuratively",
"beckon",
"rankle",
"buzzwords",
"mccullough",
"justifying",
"antiquities",
"ardency",
"tribunals",
"laughs",
"shakes",
"feedback",
"balustrade",
"mattress",
"seduces",
"attainments",
"counterattack",
"sweeter",
"deforestation",
"digests",
"sacrificed",
"scripts",
"philharmonic",
"legerdemain",
"advancements",
"disburse",
"bottles",
"scatterbrain",
"conceptions",
"planer",
"fishpond",
"tidying",
"illustration",
"dishonoring",
"impostors",
"aspect",
"summations",
"steering",
"cheesy",
"hamlets",
"cryptanalyst",
"ensued",
"upholsterer",
"detaining",
"penned",
"robbers",
"contingency",
"effectively",
"soybean",
"clockings",
"pappas",
"jellies",
"formulae",
"routines",
"savoyard",
"redefining",
"insistently",
"macroscopic",
"taster",
"phosphates",
"midsts",
"invertebrates",
"vices",
"vacancy",
"predominated",
"timeshare",
"convincing",
"paralleling",
"conceived",
"guggenheim",
"paintings",
"dispells",
"incapacitating",
"nostrand",
"pliant",
"sleuth",
"grammar",
"wallows",
"dismisses",
"wilhelm",
"exiling",
"checkers",
"proceedings",
"hoarsely",
"stretches",
"purport",
"limousine",
"inheritresses",
"company",
"thruway",
"hopkinsian",
"downcast",
"dangers",
"anatomically",
"allure",
"stampers",
"executive",
"postmaster",
"depressing",
"dragons",
"countys",
"harriet",
"attire",
"runway",
"bubbled",
"waterman",
"gerhardt",
"honorableness",
"flurry",
"refract",
"bacteria",
"antiques",
"provide",
"mysteriously",
"interrogation",
"discontinuous",
"victrola",
"replications",
"passion",
"thawed",
"alligator",
"documentaries",
"nakedness",
"veining",
"durability",
"corrosion",
"laterally",
"winnipeg",
"federally",
"divest",
"gasped",
"unselfishly",
"disclosing",
"nurturing",
"tramway",
"palmed",
"disruptions",
"footman",
"senators",
"cleave",
"effected",
"ceramic",
"leathery",
"nicely",
"frustrater",
"warning",
"lexicons",
"exactions",
"prover",
"recreates",
"puddling",
"diabolic",
"spatula",
"herons",
"blobs",
"fibrosity",
"cabinetmake",
"phobic",
"jingling",
"double",
"proving",
"taipei",
"skims",
"prophesied",
"hastily",
"parasitics",
"landings",
"taxicabs",
"subway",
"recount",
"noisemake",
"induce",
"mountaineer",
"achieved",
"celebrities",
"fluffy",
"bimini",
"briefcases",
"devote",
"stylishly",
"cleansing",
"disclaimed",
"phonemes",
"impertinent",
"connecting",
"lentil",
"revelations",
"phoned",
"lading",
"lengthens",
"nobles",
"despairing",
"hatchets",
"livably",
"lodger",
"tokens",
"ensurers",
"interconnects",
"passionate",
"peppergrass",
"bookkeep",
"humerus",
"thanklessness",
"shamed",
"choreography",
"swimmers",
"authors",
"football",
"auditions",
"greener",
"deflater",
"tariff",
"banjos",
"packages",
"gambit",
"heated",
"interfere",
"collectors",
"sideboards",
"shoreline",
"rutherford",
"ethnology",
"persecuting",
"operatives",
"demark",
"curtate",
"inheritress",
"economizer",
"pleural",
"broiling",
"minting",
"ricochet",
"lookup",
"biases",
"auctioneers",
"formula",
"morphism",
"outstripped",
"falsifying",
"fealty",
"homesteads",
"dilate",
"councilmen",
"cornea",
"intercept",
"adjoins",
"medals",
"autonomic",
"monologue",
"cruisers",
"psychoanalyst",
"registrations",
"agnostics",
"ambivalently",
"punishable",
"philosophically",
"storages",
"wistful",
"loveland",
"preferential",
"armchairs",
"washington",
"accretions",
"interchangeable",
"ambitions",
"hostesss",
"heading",
"crucifies",
"venturesome",
"mullion",
"fueling",
"bedposts",
"soapstone",
"garland",
"heaved",
"instrumentalists",
"patristic",
"tableau",
"plagiarist",
"disambiguate",
"autopilot",
"anointing",
"retypes",
"pirates",
"obfuscatory",
"octennial",
"indeterminately",
"defended",
"childbirth",
"liberation",
"kilograms",
"elaborates",
"snyaptic",
"granitic",
"carthage",
"deteriorate",
"matilda",
"antislavery",
"batter",
"cringes",
"aerosolize",
"floppily",
"caribbean",
"woodbury",
"wrapper",
"capistrano",
"meats",
"overdrafts",
"gnats",
"sympathetic",
"pritchard",
"subscripted",
"chinquapin",
"skater",
"counterfeiter",
"leathern",
"tabula",
"bowled",
"reagan",
"appropriators",
"curing",
"pacific",
"scandalous",
"anesthetized",
"reinforcements | Username | identifier_name |
|
lib.rs | {
type Target = Entry<K, V>;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
// create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()),
pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> |
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page | {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
} | identifier_body |
lib.rs | > {
type Target = Entry<K, V>;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
| pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
}
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page = | // create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()), | random_line_split |
lib.rs | {
type Target = Entry<K, V>;
fn | (&self) -> &Self::Target {
&self.entry
}
}
impl<'a, K, V> DerefMut for EntryMut<'a, K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
impl<K: Hash, V: Hash> Entry<K, V> {
fn new(key: K, val: V) -> Self {
let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val));
let entry = Entry {
key,
val,
kv_checksum,
next: 0,
next_checksum: 0 + 1,
};
debug_assert!(entry.valid());
entry
}
fn valid(&self) -> bool {
if hash_val(&self.key).wrapping_add(hash_val(&self.val))
== self.kv_checksum
&& self.next + 1 == self.next_checksum
{
true
} else {
false
}
}
fn set_next<I: Into<u64>>(&mut self, next: I) {
let next = next.into();
self.next = next;
self.next_checksum = next + 1;
}
}
impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> {
/// Create or load an index at `path`
pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> {
let mut lanes = ArrayVec::new();
// check for lane files already on disk
for n in 0..NUM_LANES {
let mut pathbuf = PathBuf::from(path.as_ref());
pathbuf.push(&format!("{:02x}", n));
if pathbuf.exists() {
let file =
OpenOptions::new().read(true).write(true).open(&pathbuf)?;
let lane_pages = Self::lane_pages(n);
let file_len = PAGE_SIZE as u64 * lane_pages as u64;
file.set_len(file_len)?;
unsafe { lanes.push(MmapMut::map_mut(&file)?) };
}
}
// find the number of already occupied pages
let mut num_pages = 0;
if let Some(last) = lanes.last() {
// help the type inferance along a bit.
let last: &MmapMut = last;
// add up pages of all but the last lane, since they must all be full
let mut full_pages = 0;
for n in 0..lanes.len().saturating_sub(1) {
println!("lane {}, pages {}", n, Self::lane_pages(n));
full_pages += Self::lane_pages(n)
}
// do a binary search to find the last populated page in the last lane
let mut low_bound = 0;
let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1;
while low_bound + 1 != high_bound {
let check = low_bound + (high_bound - low_bound) / 2;
println!(
"low bound: {}, high bound: {}, check {}",
low_bound, high_bound, check,
);
let page_ofs = PAGE_SIZE * check;
// is there a valid entry in this page?
for slot in 0..Self::entries_per_page() {
let slot_ofs =
page_ofs + slot * mem::size_of::<Entry<K, V>>();
let ptr = last.as_ptr();
let entry: &Entry<K, V> = unsafe {
mem::transmute(ptr.offset(slot_ofs as isize))
};
if entry.valid() {
low_bound = check;
break;
}
}
if low_bound != check {
high_bound = check
}
}
num_pages = full_pages + high_bound;
}
// create the index
let index = Index {
lanes: UnsafeCell::new(lanes),
path: PathBuf::from(path.as_ref()),
pages: Mutex::new(num_pages as u64),
_marker: PhantomData,
};
// initialize index with at least one page
if num_pages == 0 {
assert_eq!(index.new_page()?, 0);
}
Ok(index)
}
/// Returns how many pages have been allocated so far
pub fn pages(&self) -> usize {
*self.pages.lock() as usize
}
/// Returns how many pages fit into one lane
#[inline(always)]
fn lane_pages(n: usize) -> usize {
2_usize.pow(n as u32) * FIRST_LANE_PAGES
}
#[inline(always)]
fn entries_per_page() -> usize {
PAGE_SIZE / mem::size_of::<Entry<K, V>>()
}
// calculates the slot in the page this hashed key would
// occupy at a certain depth
#[inline(always)]
fn slot(key_hash: u64, depth: usize) -> usize {
(hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64)
as usize
}
// produces following output over page with FIRST_LANE_PAGES = 2
// (0, 0), (0, 1),
// (1, 0), (1, 1), (1, 2), (1, 3),
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
// ... and so on and so forth ...
#[inline(always)]
fn lane_page(page: usize) -> (usize, usize) {
let usize_bits = mem::size_of::<usize>() * 8;
let i = page / FIRST_LANE_PAGES + 1;
let lane = usize_bits - i.leading_zeros() as usize - 1;
let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES;
(lane, page)
}
fn new_lane(&self) -> io::Result<()> {
let lanes_ptr = self.lanes.get();
let lane_nr = unsafe { (*lanes_ptr).len() };
let num_pages = Self::lane_pages(lane_nr);
let mut path = self.path.clone();
path.push(format!("{:02x}", lane_nr));
let file_len = PAGE_SIZE as u64 * num_pages as u64;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len(file_len)?;
unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) }
Ok(())
}
fn new_page(&self) -> io::Result<u64> {
let mut page_nr = self.pages.lock();
let (_, offset) = Self::lane_page(*page_nr as usize);
if offset == 0 {
// create new lane
self.new_lane()?
}
let new_page_nr = *page_nr;
*page_nr += 1;
Ok(new_page_nr)
}
// Get a mutable reference to the `Entry`,
fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> {
// Get a reference to the `Entry`
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
unsafe {
mem::transmute(
(*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize),
)
}
}
// Get a mutable reference to the `Entry`,
// locking the corresponding shard.
fn entry_mut(
&self,
lane: usize,
page: usize,
slot: usize,
) -> EntryMut<K, V> {
let shard = (page ^ slot) % NUM_SHARDS;
// Lock the entry for writing
let lock = SHARDS[shard].lock();
let page_ofs = PAGE_SIZE * page;
let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>();
EntryMut {
entry: unsafe {
mem::transmute(
(*self.lanes.get())[lane]
.as_ptr()
.offset(slot_ofs as isize),
)
},
_lock: lock,
}
}
// Traverse the tree to find the entry for this key
fn find_key(&self, k: &K) -> io::Result<Found<K, V>> {
let mut depth = 0;
let mut abs_page = 0;
loop {
let hash = hash_val(&k);
let slot = Self::slot(hash, depth);
let (lane, page) = Self::lane_page(abs_page);
let entry = self.entry(lane, page, slot);
if !entry.valid() {
return Ok(Found::Invalid(lane, page, slot));
}
if &entry.key == k {
return Ok(Found::Some(entry));
} else if entry.next == 0 {
return Ok(Found::None(lane, page, slot));
} else {
abs_page = | deref | identifier_name |
blockchain.py | transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def | register_node | identifier_name |
|
blockchain.py | ICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
|
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
| if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html') | identifier_body |
blockchain.py | ICULTY = 2
t = time()
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.nodes = set()
self.node_id = str(uuid4()).replace('-', '')
# Create the genesis block
self.create_block(0, '00')
# data = {'block_number':0, "nonce": 0, "previous_hash": 00, "timestamp": 0, "transactions":[]}
# self.chain.append(data)
def register_node(self, node_url):
parsed_url = urlparse(node_url)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def create_block(self, nonce, previous_hash):
"""
Add a block of transactions to the blockchain
"""
block = {'block_number': transaction_blocks.count() + 1,
'timestamp': ctime(t),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash}
# Reset the current list of transactions
self.transactions = []
self.chain.append(block)
return block
def verify_transaction_signature(self, sender_public_key, signature, transaction):
public_key = RSA.importKey(binascii.unhexlify(sender_public_key))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(transaction).encode('utf8'))
try:
verifier.verify(h, binascii.unhexlify(signature))
return True
except ValueError:
return False
@staticmethod
def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):
guess = (str(transactions) + str(last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass') |
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
| random_line_split |
|
blockchain.py | (last_hash) + str(nonce)).encode('utf8')
h = hashlib.new('sha256')
h.update(guess)
guess_hash = h.hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(self):
last_block = self.chain[-1]
last_hash = self.hash(last_block)
nonce = 0
while self.valid_proof(self.transactions, last_hash, nonce) is False:
nonce += 1
return nonce
@staticmethod
def hash(block):
# We must to ensure that the Dictionary is ordered, otherwise we'll get inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode('utf8')
h = hashlib.new('sha256')
h.update(block_string)
return h.hexdigest()
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
max_length = len(self.chain)
for node in neighbours:
response = requests.get('http://' + node + '/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
if len(new_chain) > 0:
self.chain = new_chain
return True
return False
def valid_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(last_block):
return False
transactions = block['transactions'][:-1]
transaction_elements = ['sender_public_key', 'recipient_public_key', 'signature', 'product_name', 'product_details', 'amount']
transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in
transactions]
if not self.valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):
return False
last_block = block
current_index += 1
return True
def submit_transaction(self, sender_public_key, recipient_public_key, product_name, product_details,signature, amount):
transaction = OrderedDict({
'sender_public_key': sender_public_key,
'recipient_public_key': recipient_public_key,
'signature': signature,
'product_name': product_name,
'product_details': product_details,
'amount': amount
})
# Reward for mining a block
if sender_public_key == MINING_SENDER:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
# Transaction from wallet to another wallet
signature_verification = self.verify_transaction_signature(sender_public_key, signature, transaction)
if signature_verification:
self.transactions.append(transaction)
return len(self.chain) + 1
else:
return False
# Instantiate the Blockchain
blockchain = Blockchain()
# Instantiate the Node
app = Flask(__name__)
app.secret_key = 'secretkey'
CORS(app)
@app.before_request
def before_request():
g.user = None
if 'miner_email' in session:
miner = minerdb.find_one( {"email": session['miner_email'],})
g.user = miner
@app.route('/')
def index():
return render_template('./index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('miner_email',None)
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
user = minerdb.find_one( { "email": email,})
if user and user.get('password') == password:
session['miner_email'] = user.get('email')
return redirect('/profile')
else:
return redirect('/login')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
name = request.form.get('minerName')
email = request.form.get('minerEmail')
password = request.form.get('minerPass')
minerdata = {'name':name, "email":email, "password":password}
minerdb.insert_one(minerdata)
return redirect('/login')
return render_template('register.html')
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('miner_email', None)
return redirect('/login')
@app.route('/profile')
def profile():
if not g.user:
return redirect('/login')
return render_template('profile.html')
# @app.route('/register_credentials', methods=['POST'])
# def minerRegistration():
# name = request.form.get('minerName')
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# minerdata = {'name':name, "email":email, "password":password}
# minerdb.insert_one(minerdata)
# return redirect('/login')
# @app.route('/login_credentials', methods=['POST'])
# def minerLogin():
# email = request.form.get('minerEmail')
# password = request.form.get('minerPass')
# user = minerdb.find_one( { "email": email, "password" : password})
# if user:
# return redirect('/')
# else:
# return redirect('/login')
# return 'Miner email is {} and password is {}'.format(email, password)
@app.route('/configure')
def configure():
return render_template('./configure.html')
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
transactions = blockchain.transactions
response = {'transactions': transactions}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
# new_chain = {
# 'data': response
# }
#
# transaction_blocks.insert_one(new_chain)
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm
nonce = blockchain.proof_of_work()
blockchain.submit_transaction(sender_public_key=MINING_SENDER,
recipient_public_key=blockchain.node_id,
product_name='',
product_details='',
signature='',
amount=MINING_REWARD)
last_block = blockchain.chain[-1]
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(nonce, previous_hash)
response = {
'message': 'New block created',
'timestamp': block['timestamp'],
'block_number': block['block_number'],
'transactions': block['transactions'],
'nonce': block['nonce'],
'previous_hash': block['previous_hash'],
}
u_id = uuid4()
# add data into mongodb atlas
new_chain = {
'u_id': u_id,
'data': response
}
transaction_blocks.insert_one(new_chain)
id = transaction_blocks.find_one({'u_id': u_id})
print(id)
ob_id = id.get('_id')
# over db lines
# QR Code generation
data = ob_id
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill="black", back_color="white")
img.save('test.png')
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.form
required = ['confirmation_sender_public_key', 'confirmation_recipient_public_key', 'confirmation_product_name', 'confirmation_product_details', 'transaction_signature',
'confirmation_amount']
if not all(k in values for k in required):
return 'Missing values', 400
transaction_results = blockchain.submit_transaction(values['confirmation_sender_public_key'],
values['confirmation_recipient_public_key'],
values['confirmation_product_name'],
values['confirmation_product_details'],
values['transaction_signature'],
values['confirmation_amount'])
if transaction_results == False:
response = {'message': 'Invalid transaction/signature'}
return jsonify(response), 406
else:
response = {'message': 'Transaction will be added to the Block ' + str(transaction_results)}
return jsonify(response), 201
@app.route('/nodes/get', methods=['GET'])
def get_nodes():
nodes = list(blockchain.nodes)
response = {'nodes': nodes}
return jsonify(response), 200
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_node():
values = request.form
# 127.0.0.1:5002,127.0.0.1:5003, 127.0.0.1:5004
nodes = values.get('nodes').replace(' ', '').split(',')
if nodes is None:
return 'Error: Please supply a valid list of nodes', 400
for node in nodes:
| blockchain.register_node(node) | conditional_block |
|
HaarROG.py | 6004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == | 0:
local | identifier_name |
|
HaarROG.py | ListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == 0:
localDetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
print('ERROR: dataPath \n')
sys.exit()
# -----------------------------------------------------------------------
# тестирование
class Character | istics:
# положительные характеристики
tp = 0
tn = 0
# отрицательные характеристики
fp = 0
fn = 0
rateCh = Characteristics()
# border для определения, правильно найден номер или не правильно
# для площади пересечения номерных рамок
lowerBorder = 0.7
topBorder = 1.8
# два списка для составления PR-кривой
precisionList = []
recallList = []
# точки, для построения графика | identifier_body |
|
HaarROG.py | "x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height']
]
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['width'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'] + localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['height'])
markedNumPlates.append(batch)
'''
for (x, y, x1, y1) in localDetectReg:
cv2.rectangle(image, (x, y), (x1, y1), (random.randint(50, 250), 232, random.randint(50, 250)), -1)
for (x, y, x1, y1) in markedNumPlates:
cv2.rectangle(image, (x, y), (x1, y1), (0, 250, 250), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
'''
def makeDetectetData(image, numplateCascade, scaleF, minN):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#определяем, присутствует ли номерная пластина
#характеристики нужно менять, при оценке каскада
numPlates = numplateCascade.detectMultiScale(
gray,
scaleFactor = scaleF,
minNeighbors = minN
)
localDetectData = []
#для того, чтобы не появлялась ошибка, когда каскад не нашел номерных пластин
#создаем список в удобном для нас формате, т.к. для numPlates характерна запись (x, y, w, h)
if len(numPlates) == 0:
localDetectData = []
else:
for i in range(len(numPlates)):
bufData = [numPlates[i][0], numPlates[i][1], numPlates[i][0] + numPlates[i][2], numPlates[i][1] + numPlates[i][3]]
localDetectData.append(bufData)
return localDetectData
#==========================================================================
def mainProcedure(haarPath, dataPath, drivePath):
print('CV2 version: ')
print(cv2.__version__ + '\n')
# -----------------------------------------------------------------------
# загрузка данных
# загружаем каскад
try:
numplateCascade = cv2.CascadeClassifier(haarPath)
except:
print('ERROR: cv2.CascadeClassifier(haarPath) \n')
sys.exit()
try:
# загружаем файл с размеченной тестовой выборкой |
with open(dataPath, "r") as read_file:
testData = json.load(read_file)
# создаем список ключей в словаре
keys = list(testData.keys())
except:
pri | conditional_block |
|
HaarROG.py | 24797844, 0.21875, 0.26508820798514393, 0.43769470404984423, 0.5473579262213359,
0.5986547085201793, 0.545361875637105, 0.6641025641025641, 0.7389380530973452, 0.6848404255319149,
0.7845659163987139, 0.8201438848920863, 0.6258411843876177, 0.6970802919708029, 0.754424778761062,
0.6334913112164297, 0.681917211328976, 0.6909620991253644, 0.6140888208269525, 0.6779324055666004,
0.7376237623762376, 0.6530612244897959, 0.7270531400966184, 0.7558823529411764]
'''
#==========================================================================
#команда для установки opencv
#pip install opencv-python
#==========================================================================
#Для форматированя данных в программе использован json
'''
"1_11_2014_12_13_38_590.bmp38467": {
"fileref": "",
"size": 38467,
"filename": "1_11_2014_12_13_38_590.bmp",
"base64_img_data": "",
"file_attributes": {},
"regions": {
"0": {
"shape_attributes": {
"name": "rect",
"x": 186,
"y": 203,
"width": 75,
"height": 21
},
"region_attributes": {}
}
}
}
'''
#==========================================================================
import math
import random
import PIL
import cv2
import numpy as np
#для хранения данных используем json, наиболее удобный формат
import json
#метрики
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import average_precision_score
#Построение графиков
import matplotlib.pyplot as plt
import sys
#==========================================================================
#вспомогательные процедуры
def changeOpCharac(detectListParam, markedDataParam, key, rateCh, lowerBorder, topBorder):
#список для найденных уникальных номерных пластин
#список для общего количества найденных номерных пластин
findPlates = []
findUnicPlates = []
#делаем копии, чтобы работать с локальными параметрами
detectList = detectListParam.copy()
markedData = markedDataParam.copy()
for i in range(len(detectList)):
x1 = 0
x2 = 0
y1 = 0
y2 = 0
mx1 = 0
mx2 = 0
my1 = 0
my2 = 0
#в данном случае очень важная нумерация с нуля
for j in range(len(markedData[key]['regions'])):
#формируем список для размеченных данных из json
markedNumPlatesList = [
markedData[key]['regions'][str(j)]['shape_attributes']['x'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'],
markedData[key]['regions'][str(j)]['shape_attributes']['x'] + markedData[key]['regions'][str(j)]['shape_attributes']['width'],
markedData[key]['regions'][str(j)]['shape_attributes']['y'] + markedData[key]['regions'][str(j)]['shape_attributes']['height'] |
#print('LL')
#print(detectList)
#print('MNPL')
#print(markedNumPlatesList)
#x1 < x2
#упорядочили по x
if detectList[i][0] < detectList[i][2]:
x1 = detectList[i][0]
x2 = detectList[i][2]
else:
x1 = detectList[i][2]
x2 = detectList[i][0]
#упорядочили по x
if markedNumPlatesList[0] < markedNumPlatesList[2]:
mx1 = markedNumPlatesList[0]
mx2 = markedNumPlatesList[2]
else:
mx1 = markedNumPlatesList[2]
mx2 = markedNumPlatesList[0]
#y1 < y2
#упорядочили по y
if detectList[i][1] < detectList[i][3]:
y1 = detectList[i][1]
y2 = detectList[i][3]
else:
y1 = detectList[i][3]
y2 = detectList[i][1]
#упорядочили по x
if markedNumPlatesList[1] < markedNumPlatesList[3]:
my1 = markedNumPlatesList[1]
my2 = markedNumPlatesList[3]
else:
my1 = markedNumPlatesList[3]
my2 = markedNumPlatesList[1]
#print(x1, x2, mx1, mx2, y1, y2, my1, my2)
#находим пересечение отрезков
xIntersection = max(0, min(x2, mx2) - max(x1, mx1))
yIntersection = max(0, min(y2, my2) - max(y1, my1))
#print('xIntersection ' + str(xIntersection))
#print('yIntersection ' + str(yIntersection))
#вычисляем площади
detectNumArea = math.sqrt((x2 - x1)**2) * math.sqrt((y2 - y1)**2)
detectNumAreaInter = xIntersection * yIntersection
numArea = math.sqrt((markedNumPlatesList[0] - markedNumPlatesList[2])**2) * math.sqrt((markedNumPlatesList[1] - markedNumPlatesList[3])**2)
#print('detectNumAreaInter / numArea: ' + str(detectNumAreaInter / numArea))
#print('detectNumArea / numArea: ' + str(detectNumArea / numArea))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder):
findPlates.append(str(j))
if (detectNumAreaInter / numArea > lowerBorder) and (detectNumArea / numArea < topBorder) and (str(j) not in findUnicPlates):
findUnicPlates.append(str(j))
#print(findPlates, ' findPlates')
#print(detectList, ' detectList')
#print(findUnicPlates, ' findUnicPlates')
#print(len(markedData[key]['regions']), ' len(markedData[key][\'regions\'])')
rateCh.tp += len(findPlates)
rateCh.fp += len(detectList) - len(findPlates)
rateCh.fn += len(markedData[key]['regions']) - len(findUnicPlates)
return rateCh
def drawMarkAndDetect(detectReg, markedRegions, itemKey, image):
#делаем копии списков, чтобы работать с локальными параметрами
localMarkedRegions = markedRegions.copy()
localDetectReg = detectReg.copy()
markedNumPlates = []
for i in range(len(localMarkedRegions[itemKey]['regions'])):
batch = []
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['y'])
batch.append(localMarkedRegions[itemKey]['regions'][str(i)]['shape_attributes']['x'] + localMarkedRegions[itemKey]['regions'][str(i)][' | ] | random_line_split |
transcation.go | .Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr st | := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimalnum := chargeContract(contract)
var trans = &Transactions{
TxID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
| ring) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress | identifier_body |
transcation.go | node = getRandOneNode()
block, err := node.GetBlockByLimitNext(start, end)
if err != nil {
// rpc error: code = DeadlineExceeded desc = context deadline exceeded will get again
log.Warnf("node get bolck start %d end %d GetBlockByLimitNext err: %v will get again", start, end, err)
time.Sleep(time.Second * 5)
goto againblock
}
log.Infof("node get bolck start %d end %d length %d", start, end, len(block.Block))
if len(block.Block) < 1 {
log.Warnf("get bolck zero lenghth of block start %d end %d, will get again", start, end)
time.Sleep(time.Second * 5)
goto againblock
}
processBlocks(block)
node.Conn.Close()
return nil
}
func getBlockWithHeight(num int64) error {
node := getRandOneNode()
defer node.Conn.Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
| random_line_split |
||
transcation.go | .Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlock(block *api.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimaln | xID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
_, | um := chargeContract(contract)
var trans = &Transactions{
T | conditional_block |
transcation.go | .Close()
block, err := node.GetBlockByNum(num)
if err != nil {
return err
}
processBlock(block)
return nil
}
func processBlocks(blocks *api.BlockListExtention) {
for _, v := range blocks.Block {
processBlock(v)
}
}
func processBlo | i.BlockExtention) {
height := block.GetBlockHeader().GetRawData().GetNumber()
node := getRandOneNode()
defer node.Conn.Close()
for _, v := range block.Transactions {
// transaction.ret.contractRe
txid := hexutil.Encode(v.Txid)
// https://tronscan.org/#/transaction/fede1aa9e5c5d7bd179fd62e23bdd11e3c1edd0ca51e41070e34a026d6a42569
if v.Result == nil || !v.Result.Result {
continue
}
rets := v.Transaction.Ret
if len(rets) < 1 || rets[0].ContractRet != core.Transaction_Result_SUCCESS {
continue
}
//fmt.Println(txid)
log.Debugf("process block height %d txid %s", height, txid)
for _, v1 := range v.Transaction.RawData.Contract {
if v1.Type == core.Transaction_Contract_TransferContract { //转账合约
// trx 转账
unObj := &core.TransferContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, Trx, txid, form, to, height, unObj.GetAmount())
} else if v1.Type == core.Transaction_Contract_TriggerSmartContract { //调用智能合约
// trc20 转账
unObj := &core.TriggerSmartContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
// res, _ := json.Marshal(unObj)
// fmt.Println(string(res))
contract := base58.EncodeCheck(unObj.GetContractAddress())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
data := unObj.GetData()
// unObj.Data https://goethereumbook.org/en/transfer-tokens/ 参考eth 操作
to, amount, flag := processTransferData(data)
if flag { // 只有调用了 transfer(address,uint256) 才是转账
processTransaction(node, contract, txid, form, to, height, amount)
}
} else if v1.Type == core.Transaction_Contract_TransferAssetContract { //通证转账合约
// trc10 转账
unObj := &core.TransferAssetContract{}
err := proto.Unmarshal(v1.Parameter.GetValue(), unObj)
if err != nil {
log.Errorf("parse Contract %v err: %v", v1, err)
continue
}
contract := base58.EncodeCheck(unObj.GetAssetName())
form := base58.EncodeCheck(unObj.GetOwnerAddress())
to := base58.EncodeCheck(unObj.GetToAddress())
processTransaction(node, contract, txid, form, to, height, unObj.GetAmount())
}
}
}
}
// 这个结构目前没有用到 只是记录Trc20合约调用对应转换结果
var mapFunctionTcc20 = map[string]string{
"a9059cbb": "transfer(address,uint256)",
"70a08231": "balanceOf(address)",
}
// a9059cbb 4 8
// 00000000000000000000004173d5888eedd05efeda5bca710982d9c13b975f98 32 64
// 0000000000000000000000000000000000000000000000000000000000989680 32 64
// 处理合约参数
func processTransferData(trc20 []byte) (to string, amount int64, flag bool) {
if len(trc20) >= 68 {
fmt.Println(hexutil.Encode(trc20))
if hexutil.Encode(trc20[:4]) != "a9059cbb" {
return
}
// 多1位41
trc20[15] = 65
to = base58.EncodeCheck(trc20[15:36])
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[36:68])).Int64()
flag = true
}
return
}
// 处理合约转账参数
func processTransferParameter(to string, amount int64) (data []byte) {
methodID, _ := hexutil.Decode("a9059cbb")
addr, _ := base58.DecodeCheck(to)
paddedAddress := common.LeftPadBytes(addr[1:], 32)
amountBig := new(big.Int).SetInt64(amount)
paddedAmount := common.LeftPadBytes(amountBig.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
return
}
// 处理合约获取余额
func processBalanceOfData(trc20 []byte) (amount int64) {
if len(trc20) >= 32 {
amount = new(big.Int).SetBytes(common.TrimLeftZeroes(trc20[0:32])).Int64()
}
return
}
// 处理合约获取余额参数
func processBalanceOfParameter(addr string) (data []byte) {
methodID, _ := hexutil.Decode("70a08231")
add, _ := base58.DecodeCheck(addr)
paddedAddress := common.LeftPadBytes(add[1:], 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
return
}
func processTransaction(node *service.GrpcClient, contract, txid, from, to string, blockheight, amount int64) {
// 合约是否存在
if !IsContract(contract) {
return
}
// fmt.Printf("contract %s txid %s from %s to %s, blockheight %d amount %d \n",
// contract, txid, from, to, blockheight, amount)
var types string
if from == mainAddr { // 提币 or 中转
ac, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect // 手续费划转
} else {
types = Send
}
} else if to == mainAddr { // 归集记录
ac, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
if ac != nil {
types = Collect
} else {
types = ReceiveOther
}
} else {
acf, err := dbengine.SearchAccount(from)
if err != nil {
log.Error(err)
}
act, err := dbengine.SearchAccount(to)
if err != nil {
log.Error(err)
}
if act != nil { // 收币地址
if acf != nil {
types = CollectOwn // 站内转账 暂时不可能触发
} else {
types = Receive
go collectall(to) // 归集检测
}
} else {
if acf != nil {
types = CollectSend // 转账到外面地址 异常
} else {
return // 不处理 都不是平台的地址
}
}
}
// 手续费处理
transinfo, err := node.GetTransactionInfoById(txid)
var fee int64
if err != nil {
log.Error(err)
} else {
fee = transinfo.GetFee()
}
_, decimalnum := chargeContract(contract)
var trans = &Transactions{
TxID: txid,
Contract: contract,
Type: types,
BlockHeight: blockheight,
Amount: decimal.New(amount, -decimalnum).String(),
Fee: decimal.New(fee, -trxdecimal).String(),
Timestamp: time.Now().Unix(),
Address: to,
FromAddress: from,
}
| ck(block *ap | identifier_name |
fmt.rs | to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
}
Ok(())
}
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn | get_json_config | identifier_name |
|
fmt.rs | format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
}
Ok(())
}
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => |
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
| {
return Err(generic_error(e));
} | conditional_block |
fmt.rs | format(
args: Vec<PathBuf>,
ignore: Vec<PathBuf>,
check: bool,
watch: bool,
) -> Result<(), AnyError> {
let target_file_resolver = || {
// collect the files that are to be formatted
collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| {
if files.is_empty() {
Err(generic_error("No target files found."))
} else {
Ok(files)
}
})
};
let operation = |paths: Vec<PathBuf>| {
let config = get_typescript_config();
async move {
if check {
check_source_files(config, paths).await?;
} else {
format_source_files(config, paths).await?;
}
Ok(())
}
.boxed_local()
};
if watch {
file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?;
} else {
operation(target_file_resolver()?).await?;
} | /// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
. |
Ok(())
}
| random_line_split |
fmt.rs | blocks
/// (ts/tsx, js/jsx).
fn format_markdown(
file_text: &str,
ts_config: dprint_plugin_typescript::configuration::Configuration,
) -> Result<String, String> {
let md_config = get_markdown_config();
dprint_plugin_markdown::format_text(
&file_text,
&md_config,
Box::new(move |tag, text, line_width| {
let tag = tag.to_lowercase();
if matches!(
tag.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "javascript"
| "typescript"
| "json"
| "jsonc"
) {
// It's important to tell dprint proper file extension, otherwise
// it might parse the file twice.
let extension = match tag.as_str() {
"javascript" => "js",
"typescript" => "ts",
rest => rest,
};
if matches!(extension, "json" | "jsonc") {
let mut json_config = get_json_config();
json_config.line_width = line_width;
dprint_plugin_json::format_text(&text, &json_config)
} else {
let fake_filename =
PathBuf::from(format!("deno_fmt_stdin.{}", extension));
let mut codeblock_config = ts_config.clone();
codeblock_config.line_width = line_width;
dprint_plugin_typescript::format_text(
&fake_filename,
&text,
&codeblock_config,
)
}
} else {
Ok(text.to_string())
}
}),
)
}
/// Formats JSON and JSONC using the rules provided by .deno()
/// of configuration builder of https://github.com/dprint/dprint-plugin-json.
/// See https://git.io/Jt4ht for configuration.
fn format_json(file_text: &str) -> Result<String, String> {
let json_config = get_json_config();
dprint_plugin_json::format_text(&file_text, &json_config)
}
async fn check_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
// prevent threads outputting at the same time
let output_lock = Arc::new(Mutex::new(0));
run_parallelized(paths, {
let not_formatted_files_count = not_formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_text)
} else {
dprint_plugin_typescript::format_text(&file_path, &file_text, &config)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_text {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
let diff = diff(&file_text, &formatted_text);
info!("");
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error checking: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let not_formatted_files_count =
not_formatted_files_count.load(Ordering::Relaxed);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
let checked_files_str =
format!("{} {}", checked_files_count, files_str(checked_files_count));
if not_formatted_files_count == 0 {
info!("Checked {}", checked_files_str);
Ok(())
} else {
let not_formatted_files_str = files_str(not_formatted_files_count);
Err(generic_error(format!(
"Found {} not formatted {} in {}",
not_formatted_files_count, not_formatted_files_str, checked_files_str,
)))
}
}
async fn format_source_files(
config: dprint_plugin_typescript::configuration::Configuration,
paths: Vec<PathBuf>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time
run_parallelized(paths, {
let formatted_files_count = formatted_files_count.clone();
let checked_files_count = checked_files_count.clone();
move |file_path| {
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
let ext = get_extension(&file_path).unwrap_or_else(String::new);
let r = if ext == "md" {
format_markdown(&file_contents.text, config.clone())
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&file_contents.text)
} else {
dprint_plugin_typescript::format_text(
&file_path,
&file_contents.text,
&config,
)
};
match r {
Ok(formatted_text) => {
if formatted_text != file_contents.text {
write_file_contents(
&file_path,
FileContents {
had_bom: file_contents.had_bom,
text: formatted_text,
},
)?;
formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock().unwrap();
info!("{}", file_path.to_string_lossy());
}
}
Err(e) => {
let _g = output_lock.lock().unwrap();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
eprintln!(" {}", e);
}
}
Ok(())
}
})
.await?;
let formatted_files_count = formatted_files_count.load(Ordering::Relaxed);
debug!(
"Formatted {} {}",
formatted_files_count,
files_str(formatted_files_count),
);
let checked_files_count = checked_files_count.load(Ordering::Relaxed);
info!(
"Checked {} {}",
checked_files_count,
files_str(checked_files_count)
);
Ok(())
}
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> {
let mut source = String::new();
if stdin().read_to_string(&mut source).is_err() {
return Err(generic_error("Failed to read from stdin"));
}
let config = get_typescript_config();
let r = if ext.as_str() == "md" {
format_markdown(&source, config)
} else if matches!(ext.as_str(), "json" | "jsonc") {
format_json(&source)
} else {
// dprint will fallback to jsx parsing if parsing this as a .ts file doesn't work
dprint_plugin_typescript::format_text(
&PathBuf::from("_stdin.ts"),
&source,
&config,
)
};
match r {
Ok(formatted_text) => {
if check {
if formatted_text != source {
println!("Not formatted stdin");
}
} else {
stdout().write_all(formatted_text.as_bytes())?;
}
}
Err(e) => {
return Err(generic_error(e));
}
}
Ok(())
}
fn files_str(len: usize) -> &'static str {
if len <= 1 {
"file"
} else {
"files"
}
}
fn get_typescript_config(
) -> dprint_plugin_typescript::configuration::Configuration {
dprint_plugin_typescript::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration
{
dprint_plugin_markdown::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
fn get_json_config() -> dprint_plugin_json::configuration::Configuration {
dprint_plugin_json::configuration::ConfigurationBuilder::new()
.deno()
.build()
}
struct FileContents {
text: String,
had_bom: bool,
}
fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> | {
let file_bytes = fs::read(&file_path)?;
let charset = text_encoding::detect_charset(&file_bytes);
let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?;
let had_bom = file_text.starts_with(BOM_CHAR);
let text = if had_bom {
// remove the BOM
String::from(&file_text[BOM_CHAR.len_utf8()..])
} else {
String::from(file_text)
};
Ok(FileContents { text, had_bom })
} | identifier_body |
|
publish.go | (ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := | Run | identifier_name |
|
publish.go |
func (o *Options) Run(ctx context.Context, cmd *cobra.Command, f kcmdutil.Factory) error {
logrus.Infof("Publishing image set from archive %q to registry %q", o.ArchivePath, o.ToMirror)
var currentMeta v1alpha1.Metadata
var incomingMeta v1alpha1.Metadata
a := archive.NewArchiver()
// Create workspace
tmpdir, err := ioutil.TempDir(o.Dir, "imageset")
if err != nil {
return err
}
if !o.SkipCleanup {
defer os.RemoveAll(tmpdir)
}
logrus.Debugf("Using temporary directory %s to unarchive metadata", tmpdir)
// Get file information from the source archives
filesInArchive, err := o.readImageSet(a)
if err != nil {
return err
}
// Extract incoming metadata
archive, ok := filesInArchive[config.MetadataFile]
if !ok {
return errors.New("metadata is not in archive")
}
logrus.Debug("Extracting incoming metadta")
if err := a.Extract(archive, config.MetadataBasePath, tmpdir); err != nil {
return err
}
// Create backend for o.Dir
backend, err := storage.NewLocalBackend(o.Dir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Create a local workspace backend
workspace, err := storage.NewLocalBackend(tmpdir)
if err != nil {
return fmt.Errorf("error opening local backend: %v", err)
}
// Check for existing metadata. Metadata will be extracted before
// the extraction of the archive so imageset mismatches can
// be handled before the longer unarchiving process
existingMeta := filepath.Join(o.Dir, config.MetadataBasePath)
if _, err := os.Stat(existingMeta); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err
}
logrus.Infof("No existing metadata found. Setting up new workspace")
// Find first file and load metadata from that
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
} else {
// Compare metadata UID and sequence number
if err := backend.ReadMetadata(ctx, ¤tMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc | {
return fmt.Sprintf("Bundle Sequence out of order. Current sequence %v, incoming sequence %v", s.CurrSeq, s.inSeq)
} | identifier_body |
|
publish.go | err != nil {
return fmt.Errorf("error reading current metadata: %v", err)
}
if err := workspace.ReadMetadata(ctx, &incomingMeta, config.MetadataBasePath); err != nil {
return fmt.Errorf("error reading incoming metadata: %v", err)
}
logrus.Debug("Checking metadata UID")
if incomingMeta.MetadataSpec.Uid != currentMeta.MetadataSpec.Uid {
return &UuidError{currentMeta.MetadataSpec.Uid, incomingMeta.MetadataSpec.Uid}
}
logrus.Debug("Check metadata sequence number")
currRun := currentMeta.PastMirrors[len(currentMeta.PastMirrors)-1]
incomingRun := incomingMeta.PastMirrors[len(incomingMeta.PastMirrors)-1]
if incomingRun.Sequence != (currRun.Sequence + 1) {
return &SequenceError{incomingRun.Sequence, currRun.Sequence}
}
}
// Unarchive full imageset after metadata checks
if err := o.unpackImageSet(a, o.Dir); err != nil {
return err
}
// Load image associations to find layers not present locally.
assocPath := filepath.Join(o.Dir, config.AssociationsBasePath)
assocs, err := readAssociations(assocPath)
if err != nil {
return fmt.Errorf("error reading associations from %s: %v", o.Dir, err)
}
toMirrorRef, err := imagesource.ParseReference(o.ToMirror)
if err != nil {
return fmt.Errorf("error parsing mirror registry %q: %v", o.ToMirror, err)
}
logrus.Debugf("mirror reference: %#v", toMirrorRef)
if toMirrorRef.Type != imagesource.DestinationRegistry {
return fmt.Errorf("destination %q must be a registry reference", o.ToMirror)
}
var (
errs []error
// Mappings for mirroring image types.
genericMappings []imgmirror.Mapping
releaseMappings []imgmirror.Mapping
catalogMappings []imgmirror.Mapping
// Map of remote layer digest to the set of paths they should be fetched to.
missingLayers = map[string][]string{}
)
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil |
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
| {
return fmt.Errorf("invalid image mirror options: %v", err)
} | conditional_block |
publish.go | )
for imageName, assoc := range assocs {
assoc := assoc
logrus.Debugf("reading assoc: %s", assoc.Name)
// All manifest layers will be pulled below if associated,
// so just sanity-check that the layers are referenced in some association.
if len(assoc.ManifestDigests) != 0 {
for _, manifestDigest := range assoc.ManifestDigests {
if _, hasManifest := assocs[manifestDigest]; !hasManifest {
errs = append(errs, fmt.Errorf("image %q: expected associations to have manifest %s but was not found", imageName, manifestDigest))
}
}
}
for _, layerDigest := range assoc.LayerDigests {
logrus.Debugf("Found layer %v for image %s", layerDigest, imageName)
// Construct blob path, which is adjacent to the manifests path.
imageBlobPath := filepath.Join(o.Dir, "v2", assoc.Path, "blobs", layerDigest)
blobPath := filepath.Join(o.Dir, "blobs", layerDigest)
switch _, err := os.Stat(blobPath); {
case err == nil:
// If a layer exists in the archive, simply copy it to the blob path
// adjacent to its parent manifest.
if src, err := os.Open(blobPath); err == nil {
err = copyBlobFile(src, imageBlobPath)
if err := src.Close(); err != nil {
logrus.Error(err)
}
} else {
err = fmt.Errorf("error opening existing blob file: %v", err)
}
case errors.Is(err, os.ErrNotExist):
// Image layer must exist in the mirror registry since it wasn't archived,
// so fetch the layer and place it in the blob dir so it can be mirrored by `oc`.
missingLayers[layerDigest] = append(missingLayers[layerDigest], imageBlobPath)
default:
err = fmt.Errorf("accessing image %q blob %q at %s: %v", imageName, layerDigest, blobPath, err)
}
if err != nil {
errs = append(errs, err)
}
}
m := imgmirror.Mapping{Name: assoc.Name}
if m.Source, err = imagesource.ParseReference("file://" + assoc.Path); err != nil {
errs = append(errs, fmt.Errorf("error parsing source ref %q: %v", assoc.Path, err))
continue
}
// The mirrorer is not a fan of accepting an image ID when a tag symlink is available
// for some reason.
// TODO(estroz): investigate the cause of this behavior.
if assoc.TagSymlink == "" {
m.Source.Ref.ID = assoc.ID
} else {
m.Source.Ref.Tag = assoc.TagSymlink
}
m.Destination = toMirrorRef
m.Destination.Ref.Namespace = m.Source.Ref.Namespace
m.Destination.Ref.Name = m.Source.Ref.Name
m.Destination.Ref.Tag = m.Source.Ref.Tag
m.Destination.Ref.ID = m.Source.Ref.ID
switch assoc.Type {
case image.TypeGeneric:
genericMappings = append(genericMappings, m)
case image.TypeOCPRelease:
m.Destination.Ref.Tag = ""
m.Destination.Ref.ID = ""
// Only add top level release images to
// release mapping
if strings.Contains(assoc.Name, "ocp-release") {
releaseMappings = append(releaseMappings, m)
}
case image.TypeOperatorCatalog:
catalogMappings = append(catalogMappings, m)
case image.TypeOperatorBundle, image.TypeOperatorRelatedImage:
// Let the `catalog mirror` API call mirror all bundle and related images in the catalog.
// TODO(estroz): this may be incorrect if bundle and related images not in a catalog can be archived,
// ex. as an additional image. Can probably get around this by mirroring
// images of this type not mapped by preceding `catalog mirror` calls.
case image.TypeInvalid:
errs = append(errs, fmt.Errorf("image %q: image type is not set", imageName))
default:
errs = append(errs, fmt.Errorf("image %q: invalid image type %v", imageName, assoc.Type))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
if len(missingLayers) != 0 {
// Fetch all layers and mount them at the specified paths.
if err := o.fetchBlobs(ctx, incomingMeta, catalogMappings, missingLayers); err != nil {
return err
}
}
// Now that all layers have been pulled, symlink all tagged manifests to their digest files.
for _, assoc := range assocs {
if assoc.TagSymlink == "" {
continue
}
manifestsPath := filepath.Join(o.Dir, "v2", assoc.Path, "manifests")
srcPath := filepath.Join(manifestsPath, assoc.ID)
dstPath := filepath.Join(manifestsPath, assoc.TagSymlink)
if _, err := os.Stat(dstPath); err == nil || errors.Is(err, os.ErrExist) {
logrus.Debugf("image %s: tag %s symlink for manifest %s already exists", assoc.Name, assoc.TagSymlink, assoc.ID)
continue
}
if err := os.Symlink(srcPath, dstPath); err != nil {
errs = append(errs, fmt.Errorf("error symlinking manifest digest %q to tag %q: %v", assoc.ID, assoc.TagSymlink, err))
}
}
if len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// import imagecontentsourcepolicy
logrus.Info("ICSP importing not implemented")
// import catalogsource
logrus.Info("CatalogSource importing not implemented")
// Mirror all file sources of each available image type to mirror registry.
if len(genericMappings) != 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
var srcs []string
for _, m := range genericMappings {
srcs = append(srcs, m.Source.String())
}
logrus.Debugf("mirroring generic images: %q", srcs)
}
genOpts := imgmirror.NewMirrorImageOptions(o.IOStreams)
genOpts.Mappings = genericMappings
genOpts.DryRun = o.DryRun
genOpts.FromFileDir = o.Dir
genOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
genOpts.SkipMultipleScopes = true
genOpts.KeepManifestList = true
genOpts.SecurityOptions.Insecure = o.SkipTLS
if err := genOpts.Validate(); err != nil {
return fmt.Errorf("invalid image mirror options: %v", err)
}
if err := genOpts.Run(); err != nil {
return fmt.Errorf("error running generic image mirror: %v", err)
}
}
for _, m := range releaseMappings {
logrus.Debugf("mirroring release image: %s", m.Source.String())
relOpts := release.NewMirrorOptions(o.IOStreams)
relOpts.From = m.Source.String()
relOpts.FromDir = o.Dir
relOpts.To = m.Destination.String()
relOpts.SecurityOptions.Insecure = o.SkipTLS
relOpts.DryRun = o.DryRun
if err := relOpts.Complete(cmd, f, nil); err != nil {
return fmt.Errorf("error initializing release mirror options: %v", err)
}
if err := relOpts.Validate(); err != nil {
return fmt.Errorf("invalid release mirror options: %v", err)
}
if err := relOpts.Run(); err != nil {
return fmt.Errorf("error running %q release mirror: %v", m, err)
}
}
// Change to the working dir since catalog mirroring does not respect
// FileDir in the "expected" manner (unclear why).
wd, err := os.Getwd()
if err != nil {
return err
}
if err := os.Chdir(o.Dir); err != nil {
return err
}
defer func() {
if err := os.Chdir(wd); err != nil {
logrus.Error(err)
}
}()
for _, m := range catalogMappings {
logrus.Debugf("mirroring catalog image: %s", m.Source)
catOpts := catalog.NewMirrorCatalogOptions(o.IOStreams)
catOpts.DryRun = o.DryRun
catOpts.MaxPathComponents = 2
catOpts.SecurityOptions.Insecure = o.SkipTLS
catOpts.FilterOptions = imagemanifest.FilterOptions{FilterByOS: ".*"}
args := []string{
m.Source.String(),
o.ToMirror,
}
if err := catOpts.Complete(&cobra.Command{}, args); err != nil {
return fmt.Errorf("error constructing catalog options: %v", err)
}
if err := catOpts.Validate(); err != nil {
return fmt.Errorf("invalid catalog mirror options: %v", err)
}
if err := catOpts.Run(); err != nil {
return fmt.Errorf("error mirroring catalog: %v", err)
} | }
if err := os.Chdir(wd); err != nil {
return err
}
| random_line_split |
|
tree_dir.py | size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = [0, 0]
def outside(self, x, y):
""" Checks if the point (x,y) is outside the image area """
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area
def __wheel(self, event):
""" Zoom with mouse wheel """
x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas_image.canvasy(event.y)
if self.outside(x, y): return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down, smaller
if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels
self.imscale /= self.__delta
scale /= self.__delta
if event.num == 4 or event.delta == 120: # scroll up, bigger
i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1
if i < self.imscale: return # 1 pixel is bigger than the visible area
self.imscale *= self.__delta
scale *= self.__delta
# Take appropriate image from the pyramid
k = self.imscale * self.__ratio # temporary coefficient
self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)
self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))
#
self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects
# Redraw some figures before showing image on the screen
self.redraw_figures() # method for child classes
self.__show_image()
def __keystroke(self, event):
""" Scrolling with the keyboard.
Independent from the language of the keyboard, CapsLock, <Ctrl>+<key>, etc. """
if event.state - self.__previous_state == 4: # means that the Control key is pressed
pass # do nothing if Control key is pressed
else:
self.__previous_state = event.state # remember the last keystroke state
# Up, Down, Left, Right keystrokes
if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'
self.__scroll_x('scroll', 1, 'unit', event=event)
elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'
self.__scroll_x('scroll', -1, 'unit', event=event)
elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'
self.__scroll_y('scroll', -1, 'unit', event=event)
elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'
self.__scroll_y('scroll', 1, 'unit', event=event)
def redraw_figures(self):
""" Dummy function to redraw figures in the children classes """
pass
def crop(self, bbox):
""" Crop rectangle from the image and return it """
return self.__pyramid[0].crop(bbox)
def destroy(self):
""" ImageFrame destructor """
del self.move_gap
del self.canvas_image.imagetk
# # print(self.imageid)
self.pil_image.close()
del self.pil_image
self.canvas_image.delete(self.imageid) # 清除画布上的图片
map(lambda i: i.close, self.__pyramid) # close all pyramid images
del self.__pyramid[:] # delete pyramid list
del self.__pyramid # delete pyramid variable
self.canvas_image.delete(tk.ALL)
self.canvas_image.destroy()
# self.img_frame.destroy()
def __show_image(self):
""" Show image on the Canvas. Implements correct image zoom almost like in Google Maps """
box_image = self.canvas_image.coords(self.container) # get image area
box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas
self.canvas_image.canvasy(0),
self.canvas_image.canvasx(self.canvas_image.winfo_width()),
self.canvas_image.canvasy(self.canvas_image.winfo_height()))
self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly
# Get scroll region box
box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),
max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]
# Horizontal part of the image is in the visible area
if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:
box_scroll[0] = self.box_img_int[0]
box_scroll[2] = self.box_img_int[2] | # Vertical part of the image is in the visible area
if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:
box_scroll[1] = self.box_img_int[1] | random_line_split |
|
tree_dir.py | file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = self.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y | """
| identifier_name |
|
tree_dir.py | Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window | dit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = self.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = | =self.label)
self.label.bind("<1>", self.select_or_e | conditional_block |
tree_dir.py | Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<Control-1>", self.select_more)
self.label.bind("<3>", self.execute_file)
self.text_id = id
class AutoScrollbar(ttk.Scrollbar):
""" A scrollbar that hides itself if it's not needed. Works only for grid geometry manager """
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
| f.get_screen_size(self.master)
self.center_window(self.screen_width-50, self.screen_height-50)
self.master.resizable(width=False, height=False)
self.build_tree_canvas()
self.build_tree()
self.build_img_canvas()
def build_tree_canvas(self):
# create frame
self.tree_width = self.screen_width // 7
self.tree_height = self.screen_height
frame = tk.Frame(self.master, width=self.tree_width, height=self.tree_height)
frame.grid(row=0, column=0)
# canvas
self.tree_canvas=tk.Canvas(frame, bg='#FFFFFF', scrollregion=(0,0,500,500))
# vbar & hbar
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=self.tree_canvas.xview)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=self.tree_canvas.yview)
self.tree_canvas.config(width=self.tree_width,height=self.tree_height)
self.tree_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.tree_canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
def build_tree(self):
home = os.environ.get('HOME')
item = SubFileTreeItem(home)
node = SubTreeNode(self.tree_canvas, None, item)
node.update()
def build_img_canvas(self):
self.box_width = self.screen_width - self.tree_width
self.img_frame = tk.Frame(self.master, width=self.box_width, height=self.screen_height, background='red')
self.img_frame.grid(row=0, column=1)
hbar = AutoScrollbar(self.img_frame, orient=tk.HORIZONTAL)
vbar = AutoScrollbar(self.img_frame, orient=tk.VERTICAL)
hbar.grid(row=1, column=1, sticky='we')
vbar.grid(row=0, column=2, sticky='ns')
# Create canvas and bind it with scrollbars
self.canvas_image = tk.Canvas(self.img_frame, highlightthickness=0, width=self.box_width, height=self.screen_height,
xscrollcommand=hbar.set, yscrollcommand=vbar.set, background='blue')
self.canvas_image.grid(row=0, column=1, sticky='nswe')
self.canvas_image.update() # wait till canvas is created
hbar.configure(command=self.__scroll_x) # bind scrollbars to the canvas
vbar.configure(command=self.__scroll_y)
# Bind events to the Canvas
self.canvas_image.bind('<Configure>', lambda event: self.__show_image()) # canvas is resized
self.canvas_image.bind('<Control-ButtonPress-1>', self.__move_from) # remember canvas position
self.canvas_image.bind('<Control-B1-Motion>', self.__move_to) # move canvas to the new position
self.canvas_image.bind('<Control-B1-ButtonRelease>', self.get_move_gap)
self.canvas_image.bind('<MouseWheel>', self.__wheel) # zoom for Windows and MacOS, but not Linux
self.canvas_image.bind('<Button-5>', self.__wheel) # zoom for Linux, wheel scroll down
self.canvas_image.bind('<Button-4>', self.__wheel) # zoom for Linux, wheel scroll up
# Handle keystrokes in idle mode, because program slows down on a weak computers,
# when too many key stroke events in the same time
# self.canvas_image.bind('<Key>', lambda event: self.canvas_image.after_idle(self.__keystroke, event))
def get_screen_size(self, window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(self, window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(self, width, height):
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
self.master.geometry(size)
def show_img_in_canvas(self, pil_image):
self.imscale = 1.0 # scale for the canvas image zoom, public for outer classes
self.__delta = 1.3 # zoom magnitude
self.__filter = Image.ANTIALIAS # could be: NEAREST, BILINEAR, BICUBIC and ANTIALIAS
self.__previous_state = 0 # previous state of the keyboard
self.pil_image = pil_image
with warnings.catch_warnings(): # suppress DecompressionBombWarning
warnings.simplefilter('ignore')
self.__image = self.pil_image # open image, but down't load it
self.imwidth, self.imheight = self.__image.size # public for outer classes
self.__min_side = min(self.imwidth, self.imheight) # get the smaller image side
# Create image pyramid
self.__pyramid = [self.pil_image]
# Set ratio coefficient for image pyramid
self.__ratio = 1.0
self.__curr_img = 0 # current image from the pyramid
self.__scale = self.imscale * self.__ratio # image pyramide scale
self.__reduction = 2 # reduction degree of image pyramid
w, h = self.__pyramid[-1].size
while w > 512 and h > 512: # top pyramid image is around 512 pixels in size
w /= self.__reduction # divide on reduction degree
h /= self.__reduction # divide on reduction degree
self.__pyramid.append(self.__pyramid[-1].resize((int(w), int(h)), self.__filter))
# Put image into container rectangle and use it to set proper coordinates to the image
self.container = self.canvas_image.create_rectangle((0, 0, self.imwidth, self.imheight), width=0)
self.__show_image() # show image on the canvas
self.canvas_image.focus_set() # set focus on the canvas
def __scroll_x(self, *args, **kwargs):
""" Scroll canvas horizontally and redraw the image """
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image
def __scroll_y(self, *args, **kwargs):
""" Scroll canvas vertically and redraw the image """
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image
def __move_from(self, event):
""" Remember previous coordinates for scrolling with the mouse """
self.canvas_image.scan_mark(event.x, event.y)
self.from_coord = (event.x, event.y)
def __move_to(self, event):
""" Drag (move) canvas to the new position """
self.canvas_image.scan_dragto(event.x, event.y, gain=1)
self.to_coord = (event.x, event.y)
self.__show_image() # zoom tile and show it on the canvas
def get_move_gap(self, event):
""" B1 release时获取移动的距离 """
try:
self.move_gap = list(np.array(self.to_coord) - np.array(self.from_coord) + np.array(self.move_gap))
except:
self.move_gap = | self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with the widget ' + self.__class__.__name__)
def place(self, **kw):
raise tk.TclError('Cannot use place with the widget ' + self.__class__.__name__)
class WholeWindow():
move_gap = [0, 0]
def __init__(self, master):
self.master = master # 父窗口root
self.screen_width, self.screen_height = sel | identifier_body |
global_stats.rs | _string()});
let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static C | ,
pub title: String,
pub label: String,
pub font_size: f | ategory | identifier_name |
global_stats.rs | , downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
c | at: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_ | identifier_body |
|
global_stats.rs | _bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f64,
/// SVG fill
pub color: String,
pub count: u32,
pub weight: f64,
pub bounds: treemap::Rect,
pub sub: Vec<TreeBox>,
}
impl TreeBox {
pub fn line_y(&self, nth: usize) -> f64 {
self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64
}
pub fn can_fit_count(&self) -> bool {
self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h
}
}
impl treemap::Mappable for TreeBox {
fn size(&self) -> f64 { self.weight }
fn bounds(&self) -> &treemap::Rect { &self.bounds }
fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; }
}
async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> {
use treemap::*;
let mut roots = cat_slugs(&CATEGORIES.root);
#[track_caller]
fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
items.swap_remove(pos)
}
#[track_caller]
fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox {
let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}"));
&mut items[pos]
}
fn new_cat(sub: Vec<TreeBox>) -> TreeBox {
TreeBox {
cat: CATEGORIES.root.values().next().unwrap(),
title: String::new(),
label: String::new(),
font_size: 0.,
color: String::new(),
count: 0,
weight: 0.,
bounds: Rect::new(),
sub,
}
}
// names don't fit
get_cat("database-implementations", &mut roots).label = "Database".into();
get_cat("simulation", &mut roots).label = "Sim".into();
get_cat("caching", &mut roots).label = "Cache".into();
get_cat("config", &mut roots).label = "Config".into();
get_cat("os", &mut roots).label = "OS".into();
get_cat("internationalization", &mut roots).label = "i18n".into();
get_cat("authentication", &mut roots).label = "Auth".into();
get_cat("visualization", &mut roots).label = "Visualize".into();
get_cat("accessibility", &mut roots).label = "a11y".into();
get_cat("compilers", &mut roots).label = "Lang".into();
get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into();
get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into();
get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into();
// group them in a more sensible way
let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)];
roots.push(new_cat(parsers));
let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)];
roots.push(new_cat(hw));
let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)];
roots.push(new_cat(db));
let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)];
roots.push(new_cat(gg));
let int = take_cat("command-line-interface", &mut roots);
let cli = vec![int, take_cat("command-line-utilities", &mut roots)];
roots.push(new_cat(cli));
let mut editors = take_cat("text-editors", &mut roots);
editors.label = "Editors".into();
let txt = vec![
take_cat("text-processing", &mut roots),
editors,
take_cat("template-engine", &mut roots),
take_cat("value-formatting", &mut roots),
];
roots.push(new_cat(txt));
let wasm = take_cat("wasm", &mut roots);
get_cat("web-programming", &mut roots).sub.push(wasm);
let mut asyn = take_cat("asynchronous", &mut roots);
asyn.label = "Async".into();
get_cat("network-programming", &mut roots).sub.push(asyn);
let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub);
proc.label = "Proc macros".into();
get_cat("rust-patterns", &mut roots).sub.push(proc);
let concurrency = take_cat("concurrency", &mut roots);
get_cat("rust-patterns", &mut roots).sub.push(concurrency);
let mut cr = get_cat("cryptography", &mut roots).sub.remove(0);
cr.label = "Crypto Magic Beans".into();
roots.push(cr);
// first layout of top-level boxes (won't be used for anything other than second layout)
for top in roots.iter_mut() {
let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? };
top.count = count;
top.weight = weight;
let mut top_copy = top.clone();
top_copy.sub = Vec::new();
for i in top.sub.iter_mut() {
let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?;
i.count = count;
i.weight = weight;
top.count += i.count;
top.weight += i.weight;
assert!(i.sub.is_empty());
}
if top_copy.count > 0 {
top.sub.insert(0, top_copy);
}
}
let mut items_flattened = Vec::new();
let layout = TreemapLayout::new();
layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.));
for parent in roots.iter_mut() {
let layout = TreemapLayout::new();
layout.layout_items(&mut parent.sub, parent.bounds); | items_flattened.append(&mut parent.sub); | random_line_split |
|
global_stats.rs | let hs_deps2 = Histogram {
max: hs_deps1.max,
buckets: hs_deps1.buckets.split_off(10),
bucket_labels: hs_deps1.bucket_labels.split_off(10),
};
let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?;
let mut hs_rev_deps = Histogram::new(rev_deps, true,
&[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000],
|n| if n > 2 {format!("≥{n}")} else {n.to_string()});
hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5));
let age_label = |n| match n {
0..=1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
};
let total_crate_num = kitchen_sink.all_crates().count() as u32;
let stats = GlobalStats {
total_crate_num,
total_owners_at_month,
max_total_owners,
max_daily_downloads_rate,
start_week_offset,
weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32,
dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2),
dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2),
max_downloads_per_week,
dl_grid_line_every,
hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}),
hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| {
let mut t = format_bytes(n*1024);
t.insert(0, '≤'); t
}),
hs_deps1, hs_deps2,
hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n {
0 => "one-off".to_string(),
1 => "≤1 week".to_string(),
2..=4 => format!("≤{n} weeks"),
5 => "≤1 month".to_string(),
6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()),
52 => "≤1 year".to_string(),
_ => format!("≤{} years", (n as f64 / 52.).round()),
}),
hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label),
hs_owner_crates,
categories,
rustc_stats_all,
rustc_stats_recent,
rustc_stats_recent_num,
hs_rev_deps,
};
templates::global_stats(out, &Page {
title: "State of the Rust/Cargo crates ecosystem".to_owned(),
description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()),
noindex: false,
search_meta: true,
critical_css_data: Some(include_str!("../../style/public/home.css")),
critical_css_dev_url: Some("/home.css"),
..Default::default()
}, &dl, &stats, &urler)?;
Ok(())
}
#[derive(Default, Copy, Clone, Debug)]
pub struct Compat {
pub(crate) bad: u32,
pub(crate) maybe_bad: u32,
pub(crate) unknown: u32,
pub(crate) maybe_ok: u32,
pub(crate) ok: u32,
}
impl Compat {
pub fn sum(&self) -> u32 {
self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok
}
}
fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> {
// (ok, maybe, not), [0] is unused
let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize];
for c in compat.values() {
// can't compile at all
if !c.iter().any(|(_, c)| c.has_ever_built()) {
continue;
}
// stats for latest crate version only
let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) {
Some((_, c)) => c,
None => continue,
};
let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) {
Some((_, c)) => c,
None => latest_ver,
};
let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0);
let newest_bad = latest_ver.newest_bad().unwrap_or(0);
let oldest_ok = latest_ver.oldest_ok().unwrap_or(999);
let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999);
for (ver, c) in rustc_versions.iter_mut().enumerate() {
let ver = ver as u16;
if ver >= oldest_ok {
if ver >= oldest_ok_raw {
c.ok += 1;
} else {
c.maybe_ok += 1;
}
} else if ver <= newest_bad {
if ver <= newest_bad_raw {
c.bad += 1;
} else {
c.maybe_bad += 1;
}
} else {
c.unknown += 1;
}
}
}
// resize to width
let width = 330;
for c in &mut rustc_versions {
let sum = c.sum();
c.bad = (c.bad * width + width / 2) / sum;
c.ok = (c.ok * width + width / 2) / sum;
c.maybe_bad = (c.maybe_bad * width + width / 2) / sum;
c.maybe_ok = (c.maybe_ok * width + width / 2) / sum;
c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok;
}
Ok(rustc_versions)
}
fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> {
let mut out = Vec::with_capacity(sub.len());
for c in sub.values() {
if c.slug == "uncategorized" {
continue;
}
out.push(TreeBox {
cat: c,
label: c.name.clone(),
title: c.name.clone(),
count: 0,
weight: 0.,
bounds: treemap::Rect::new(),
color: String::new(),
font_size: 12.,
sub: cat_slugs(&c.sub),
});
}
out
}
#[derive(Debug, Clone)]
pub struct TreeBox {
pub cat: &'static Category,
pub title: String,
pub label: String,
pub font_size: f | to_string()});
| conditional_block |
|
worker.go | (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface {
GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker |
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O | {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
} | identifier_body |
worker.go | (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface {
GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
}
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil |
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O | {
logger.Error(err)
return err
} | conditional_block |
worker.go | ++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
logger.Error(err)
return nil, err
}
io.Copy(f, resp.Body)
f.Close()
return f, nil
}
return nil, fmt.Errorf("Status code not 200 was %v: %s", resp.StatusCode, c.URL)
}
func (w worker) createTask(taskID string, startOnCreate bool) {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "create-task",
})
done := false
_, err := w.taskManager.GetTask(taskID)
if err == nil {
return
}
for {
members, err := w.memberManager.GetTaskAgreementMembers()
if err != nil {
logger.Error(err)
continue
}
for _, member := range shuffle(members) {
uri := fmt.Sprintf("%s://%s:%s", member.GetRestProto(), member.GetAddr(), member.GetRestPort())
logger.Debugf("getting task %v from %v", taskID, uri)
c, err := client.New(uri, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.Error(err)
continue
}
taskResult := c.GetTask(taskID)
if taskResult.Err != nil {
logger.WithField("err", taskResult.Err.Error()).Debug("error getting task")
continue
}
// this block addresses the condition when we are creating and starting
// a task and the task is created but fails to start (deps were not yet met)
if startOnCreate {
if _, err := w.taskManager.GetTask(taskID); err == nil {
logger.Debug("starting task")
if errs := w.taskManager.StartTaskTribe(taskID); errs != nil {
fields := log.Fields{}
for idx, e := range errs {
fields[fmt.Sprintf("err-%d", idx)] = e.Error()
}
logger.WithFields(fields).Error("error starting task")
continue
}
done = true
break
}
}
logger.Debug("creating task")
opt := core.SetTaskID(taskID)
_, errs := w.taskManager.CreateTaskTribe(
getSchedule(taskResult.ScheduledTaskReturned.Schedule),
taskResult.Workflow,
startOnCreate,
opt)
if errs != nil && len(errs.Errors()) > 0 {
fields := log.Fields{}
for idx, e := range errs.Errors() {
fields[fmt.Sprintf("err-%d", idx)] = e
}
logger.WithFields(fields).Debug("error creating task")
continue
}
logger.Debugf("task created")
done = true
break
}
if done {
break
}
time.Sleep(500 * time.Millisecond)
}
}
func (w worker) startTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "start-task",
})
logger.Debug("starting task")
errs := w.taskManager.StartTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
if errs != nil {
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyRunning.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
}
return errors.New("error starting task")
}
func (w worker) stopTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "stop-task",
})
errs := w.taskManager.StopTaskTribe(taskID)
if errs == nil || len(errs) == 0 {
return nil
}
for _, err := range errs {
if err.Error() == scheduler.ErrTaskAlreadyStopped.Error() {
logger.WithFields(err.Fields()).Info(err)
return nil
} else {
logger.WithFields(err.Fields()).Info(err)
}
}
return errors.New("error stopping task")
}
func (w worker) removeTask(taskID string) error {
logger := w.logger.WithFields(log.Fields{
"task-id": taskID,
"_block": "remove-task",
})
err := w.taskManager.RemoveTaskTribe(taskID)
if err == nil {
return nil
}
logger.Info(err)
return err
}
func shuffle(m []Member) []Member {
result := make([]Member, len(m))
perm := rand.Perm(len(m))
for i, v := range perm {
result[v] = m[i]
}
return result
}
func (w worker) isPluginLoaded(n, t string, v int) bool {
catalog := w.pluginManager.PluginCatalog()
for _, item := range catalog {
if item.TypeName() == t &&
item.Name() == n &&
item.Version() == v {
w.logger.WithFields(log.Fields{
"name": n,
"version": v,
"type": t,
"_block": "is-plugin-loaded",
}).Debugf("plugin already loaded")
return true
}
}
return false
}
func | getSchedule | identifier_name |
|
worker.go | ) (core.Task, core.TaskErrors)
StopTaskTribe(id string) []serror.SnapError
StartTaskTribe(id string) []serror.SnapError
RemoveTaskTribe(id string) error
}
type getsMembers interface {
GetPluginAgreementMembers() ([]Member, error)
GetTaskAgreementMembers() ([]Member, error)
GetRequestPassword() string
}
type Member interface { | GetAddr() net.IP
GetRestPort() string
GetRestProto() string
GetRestInsecureSkipVerify() bool
GetName() string
}
// newPluginWorker
func newWorker(id int,
pluginQueue chan PluginRequest,
taskQueue chan TaskRequest,
quitChan chan struct{},
wg *sync.WaitGroup,
pm ManagesPlugins,
tm ManagesTasks,
mm getsMembers) worker {
logger := log.WithFields(log.Fields{
"_module": "worker",
"worker-id": id,
})
worker := worker{
pluginManager: pm,
taskManager: tm,
memberManager: mm,
id: id,
pluginWork: pluginQueue,
taskWork: taskQueue,
waitGroup: wg,
quitChan: quitChan,
logger: logger,
}
return worker
}
type worker struct {
pluginManager ManagesPlugins
memberManager getsMembers
taskManager ManagesTasks
id int
pluginWork chan PluginRequest
taskWork chan TaskRequest
quitChan chan struct{}
waitGroup *sync.WaitGroup
logger *log.Entry
}
func DispatchWorkers(nworkers int, pluginQueue chan PluginRequest, taskQueue chan TaskRequest, quitChan chan struct{}, workerWaitGroup *sync.WaitGroup, cp ManagesPlugins, tm ManagesTasks, mm getsMembers) {
for i := 0; i < nworkers; i++ {
log.WithFields(log.Fields{
"_module": "worker",
"_block": "dispatch-workers",
}).Infof("dispatching tribe worker-%d", i+1)
worker := newWorker(i+1, pluginQueue, taskQueue, quitChan, workerWaitGroup, cp, tm, mm)
worker.start()
}
}
// Start "starts" the workers
func (w worker) start() {
logger := w.logger.WithFields(log.Fields{"_block": "start"})
// task worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting task worker")
for {
select {
case work := <-w.taskWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"task": work.Task.ID,
"request-type": work.RequestType.String(),
"retries": work.retryCount,
})
logger.Debug("received task work")
if work.RequestType == TaskStartedType {
if err := w.startTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task start request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskStoppedType {
if err := w.stopTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing task stop request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
if work.RequestType == TaskCreatedType {
w.createTask(work.Task.ID, work.Task.StartOnCreate)
}
if work.RequestType == TaskRemovedType {
if err := w.removeTask(work.Task.ID); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.taskWork <- work
}
}
}
case <-w.quitChan:
logger.Infof("stopping tribe worker")
return
}
}
}()
// plugin worker
w.waitGroup.Add(1)
go func() {
defer w.waitGroup.Done()
logger.Debug("starting plugin worker")
for {
select {
case work := <-w.pluginWork:
// Receive a work request.
logger := w.logger.WithFields(log.Fields{
"plugin-name": work.Plugin.Name(),
"plugin-version": work.Plugin.Version(),
"plugin-type": work.Plugin.TypeName(),
"request-type": work.RequestType.String(),
})
logger.Debug("received plugin work")
if work.RequestType == PluginLoadedType {
if err := w.loadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
if work.RequestType == PluginUnloadedType {
if err := w.unloadPlugin(work.Plugin); err != nil {
if work.retryCount < retryLimit {
logger.WithField("retry-count", work.retryCount).Debug("requeueing request")
work.retryCount++
time.Sleep(retryDelay)
w.pluginWork <- work
}
}
}
case <-w.quitChan:
w.logger.Debug("stop tribe plugin worker")
return
}
}
}()
}
func (w worker) unloadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "unload-plugin",
})
if !w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
if _, err := w.pluginManager.Unload(plugin); err != nil {
logger.WithField("err", err).Info("failed to unload plugin")
return err
}
return nil
}
func (w worker) loadPlugin(plugin core.Plugin) error {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"_block": "load-plugin",
})
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
members, err := w.memberManager.GetPluginAgreementMembers()
if err != nil {
logger.Error(err)
return err
}
for _, member := range shuffle(members) {
url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
if err != nil {
logger.WithFields(log.Fields{
"err": err,
"url": url,
}).Info("unable to create client")
continue
}
f, err := w.downloadPlugin(c, plugin)
// If we can't download from this member, try the next
if err != nil {
logger.Error(err)
continue
}
rp, err := core.NewRequestedPlugin(f.Name(), TempPath, nil)
if err != nil {
logger.Error(err)
return err
}
_, err = w.pluginManager.Load(rp)
if err != nil {
logger.Error(err)
return err
}
if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
return nil
}
return errors.New("failed to load plugin")
}
return errors.New("failed to find a member with the plugin")
}
func (w worker) downloadPlugin(c *client.Client, plugin core.Plugin) (*os.File, error) {
logger := w.logger.WithFields(log.Fields{
"plugin-name": plugin.Name(),
"plugin-version": plugin.Version(),
"plugin-type": plugin.TypeName(),
"url": c.URL,
"_block": "download-plugin",
})
resp, err := c.TribeRequest()
if err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Info("plugin not found")
return nil, fmt.Errorf("Plugin not found at %s: %s", c.URL, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if resp.Header.Get("Content-Type") != "application/x-gzip" {
logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
}
dir, err := ioutil.TempDir("", "")
if err != nil {
logger.Error(err)
return nil, err
}
fpath := path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version()))
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC | random_line_split |
|
selection_test.go | pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func verify(t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
}
func (m *mockMembershipManager) add(channelID string, peers ...api.ChannelPeer) *mockMembershipManager {
m.peerConfigs[channelID] = []api.ChannelPeer(peers)
return m
}
type mockCCDataProvider struct {
ccData map[string]*ccprovider.ChaincodeData
}
func newMockCCDataProvider() *mockCCDataProvider {
return &mockCCDataProvider{ccData: make(map[string]*ccprovider.ChaincodeData)}
}
func (p *mockCCDataProvider) QueryChaincodeData(channelID string, chaincodeID string) (*ccprovider.ChaincodeData, error) {
return p.ccData[newResolverKey(channelID, chaincodeID).String()], nil
}
func (p *mockCCDataProvider) add(channelID string, chaincodeID string, policy *ccprovider.ChaincodeData) *mockCCDataProvider {
p.ccData[newResolverKey(channelID, chaincodeID).String()] = policy
return p
}
// Policy: Org1
func getPolicy1() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1)
if err != nil {
panic(err)
}
return newCCData(&common.SignaturePolicyEnvelope{
Version: 0,
Rule: signedBy[o1],
Identities: identities,
})
}
// Policy: 1 of [(2 of [Org1, Org2]),(2 of [Org1, Org3, Org4])]
func getPolicy2() *ccprovider.ChaincodeData {
signedBy, identities, err := pgresolver.GetPolicies(org1, org2, org3, org4)
if err != nil | {
panic(err)
} | conditional_block |
|
selection_test.go | ),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func verify(t *testing.T, service api.SelectionService, expectedPeerGroups []api.PeerGroup, channelID string, chaincodeIDs ...string) {
// Set the log level to WARNING since the following spits out too much info in DEBUG
module := "pg-resolver"
level := logging.GetLevel(module)
logging.SetLevel(module, apilogging.WARNING)
defer logging.SetLevel(module, level)
for i := 0; i < len(expectedPeerGroups); i++ {
peers, err := service.GetEndorsersForChaincode(channelID, nil, chaincodeIDs...)
if err != nil {
t.Fatalf("error getting endorsers: %s", err)
}
if !containsPeerGroup(expectedPeerGroups, peers) {
t.Fatalf("peer group %s is not one of the expected peer groups: %v", toString(peers), expectedPeerGroups)
}
}
}
func containsPeerGroup(groups []api.PeerGroup, peers []apifabclient.Peer) bool {
for _, g := range groups {
if containsAllPeers(peers, g) {
return true
}
}
return false
}
func containsAllPeers(peers []apifabclient.Peer, pg api.PeerGroup) bool {
if len(peers) != len(pg.Peers()) {
return false
}
for _, peer := range peers {
if !containsPeer(pg.Peers(), peer) {
return false
}
}
return true
}
func containsPeer(peers []apifabclient.Peer, peer apifabclient.Peer) bool {
for _, p := range peers {
if p.URL() == peer.URL() {
return true
}
}
return false
}
func pg(peers ...api.ChannelPeer) api.PeerGroup {
return pgresolver.NewPeerGroup(peers...)
}
func peer(name string, mspID string) api.ChannelPeer {
peer, err := sdkpeer.New(configImp, sdkpeer.WithURL(name+":7051"))
if err != nil {
panic(fmt.Sprintf("Failed to create peer: %v)", err))
}
peer.SetName(name)
peer.SetMSPID(mspID)
return channelpeer.New(peer, "", 0, nil)
}
func newMockSelectionService(membershipManager api.MembershipManager, ccDataProvider api.CCDataProvider, lbp api.LoadBalancePolicy) api.SelectionService {
return &selectionServiceImpl{
membershipManager: membershipManager,
ccDataProvider: ccDataProvider,
pgLBP: lbp,
pgResolvers: make(map[string]api.PeerGroupResolver),
}
}
type mockMembershipManager struct {
peerConfigs map[string][]api.ChannelPeer
}
func (m *mockMembershipManager) GetPeersOfChannel(channelID string) api.ChannelMembership {
return api.ChannelMembership{Peers: m.peerConfigs[channelID]}
}
func newMockMembershipManager() *mockMembershipManager | {
return &mockMembershipManager{peerConfigs: make(map[string][]api.ChannelPeer)}
} | identifier_body |
|
selection_test.go | ",
SwOpts: &bccspFactory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: false,
FileKeystore: &bccspFactory.FileKeystoreOpts{KeyStorePath: "../sampleconfig/msp/keystore/"},
},
}
bccspFactory.InitFactories(opts)
//
configData, err := ioutil.ReadFile("../sampleconfig/config.yaml")
if err != nil {
panic(fmt.Sprintf("File error: %v\n", err))
}
configMsg := &configmanagerApi.ConfigMessage{MspID: mspID,
Peers: []configmanagerApi.PeerConfig{configmanagerApi.PeerConfig{
PeerID: "jdoe", App: []configmanagerApi.AppConfig{
configmanagerApi.AppConfig{AppName: "txnsnap", Config: string(configData)}}}}}
stub := getMockStub()
configBytes, err := json.Marshal(configMsg)
if err != nil {
panic(fmt.Sprintf("Cannot Marshal %s\n", err))
}
//upload valid message to HL
err = uplaodConfigToHL(stub, configBytes)
if err != nil {
panic(fmt.Sprintf("Cannot upload %s\n", err))
}
configmgmtService.Initialize(stub, mspID)
config, err := config.NewConfig("../sampleconfig", channelID)
if err != nil {
panic(fmt.Sprintf("Error initializing config: %s", err))
}
_, err = GetInstance("testChannel", &sampleConfig{config})
if err != nil {
panic(fmt.Sprintf("Client GetInstance return error %v", err))
}
os.Exit(m.Run())
}
func getMockStub() *mockstub.MockStub {
stub := mockstub.NewMockStub("testConfigState", nil)
stub.SetMspID("Org1MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService( | add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6 | newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider(). | random_line_split |
selection_test.go | MSP")
stub.MockTransactionStart("startTxn")
stub.ChannelID = channelID
return stub
}
//uplaodConfigToHL to upload key&config to repository
func uplaodConfigToHL(stub *mockstub.MockStub, config []byte) error {
configManager := mgmt.NewConfigManager(stub)
if configManager == nil {
return fmt.Errorf("Cannot instantiate config manager")
}
err := configManager.Save(config)
return err
}
func TestGetEndorsersForChaincodeOneCC(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1)) = Org1
expected := []api.PeerGroup{
// Org1
pg(p1), pg(p2),
}
verify(t, service, expected, channel1, cc1)
}
func TestGetEndorsersForChaincodeTwoCCs(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP())
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
}
func TestGetEndorsersForChaincodeTwoCCsTwoChannels(t *testing.T) {
service := newMockSelectionService(
newMockMembershipManager().
add(channel1, p1, p2, p3, p4, p5, p6, p7, p8).
add(channel2, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12),
newMockCCDataProvider().
add(channel1, cc1, getPolicy1()).
add(channel1, cc2, getPolicy2()).
add(channel2, cc1, getPolicy3()).
add(channel2, cc2, getPolicy2()),
pgresolver.NewRoundRobinLBP(),
)
// Channel1(Policy(cc1) and Policy(cc2)) = Org1 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected := []api.PeerGroup{
// Org1 and Org2
pg(p1, p3), pg(p1, p4), pg(p2, p3), pg(p2, p4),
// Org1 and Org3
pg(p1, p5), pg(p1, p6), pg(p1, p7), pg(p2, p5), pg(p2, p6), pg(p2, p7),
// Org1 and Org4
pg(p1, p8), pg(p1, p9), pg(p1, p10), pg(p2, p8), pg(p2, p9), pg(p2, p10),
// Org1 and Org3 and Org4
pg(p1, p5, p8), pg(p1, p5, p9), pg(p1, p5, p10), pg(p1, p6, p8), pg(p1, p6, p9), pg(p1, p6, p10), pg(p1, p7, p8), pg(p1, p7, p9), pg(p1, p7, p10),
pg(p2, p5, p8), pg(p2, p5, p9), pg(p2, p5, p10), pg(p2, p6, p8), pg(p2, p6, p9), pg(p2, p6, p10), pg(p2, p7, p8), pg(p2, p7, p9), pg(p2, p7, p10),
}
verify(t, service, expected, channel1, cc1, cc2)
// Channel2(Policy(cc1) and Policy(cc2)) = Org5 and (1 of [(2 of [Org1,Org2]),(2 of [Org1,Org3,Org4])])
expected = []api.PeerGroup{
// Org5 and Org2
pg(p11, p1, p3), pg(p11, p1, p4), pg(p11, p2, p3), pg(p11, p2, p4),
pg(p12, p1, p3), pg(p12, p1, p4), pg(p12, p2, p3), pg(p12, p2, p4),
// Org5 and Org3
pg(p11, p1, p5), pg(p11, p1, p6), pg(p11, p1, p7), pg(p11, p2, p5), pg(p11, p2, p6), pg(p11, p2, p7),
pg(p12, p1, p5), pg(p12, p1, p6), pg(p12, p1, p7), pg(p12, p2, p5), pg(p12, p2, p6), pg(p12, p2, p7),
// Org5 and Org4
pg(p11, p1, p8), pg(p11, p1, p9), pg(p11, p1, p10), pg(p11, p2, p8), pg(p11, p2, p9), pg(p11, p2, p10),
pg(p12, p1, p8), pg(p12, p1, p9), pg(p12, p1, p10), pg(p12, p2, p8), pg(p12, p2, p9), pg(p12, p2, p10),
// Org5 and Org3 and Org4
pg(p11, p5, p8), pg(p11, p5, p9), pg(p11, p5, p10), pg(p11, p6, p8), pg(p11, p6, p9), pg(p11, p6, p10), pg(p11, p7, p8), pg(p11, p7, p9), pg(p11, p7, p10),
pg(p12, p5, p8), pg(p12, p5, p9), pg(p12, p5, p10), pg(p12, p6, p8), pg(p12, p6, p9), pg(p12, p6, p10), pg(p12, p7, p8), pg(p12, p7, p9), pg(p12, p7, p10),
}
verify(t, service, expected, channel2, cc1, cc2)
}
func | verify | identifier_name |
|
mod.rs | " },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row |
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() | {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
} | conditional_block |
mod.rs | happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn | () {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() | test_struct | identifier_name |
mod.rs | happy" },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.csv.into_bytes()
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") }, | /// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() {
| random_line_split |
|
mod.rs | " },
/// Record { count: 10, animal: "cheetah", description: "fast" },
/// Record { count: 4, animal: "armadillo", description: "armored" },
/// Record { count: 9, animal: "platypus", description: "unique" },
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,description
/// 7,penguin,happy
/// 10,cheetah,fast
/// 4,armadillo,armored
/// 9,platypus,unique
/// ");
/// # }
/// ```
///
/// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html
pub struct Writer<W: Write, E: Encodable> {
csv: csv::Writer<W>,
first_row: bool,
record_type: PhantomData<E>,
}
impl<E: Encodable> Writer<File, E> {
/// Creates a new typed CSV writer that writes to the file path given.
///
/// The file is created if it does not already exist and is truncated
/// otherwise.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> {
Ok(Self::from_csv_writer(csv::Writer::from_file(path)?))
}
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Creates a new typed CSV writer that writes to the `io::Write` given.
///
/// Note that the writer is buffered for you automatically.
pub fn from_writer(w: W) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_writer(w))
}
/// Creates a new typed CSV writer that writes to the CSV writer given.
///
/// This lets you specify options to the underlying CSV writer (e.g. to use
/// a different delimiter).
pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> {
Writer {
csv: w,
first_row: true,
record_type: PhantomData,
}
}
/// Creates a new typed CSV writer that writes to the buffer given.
///
/// This lets you specify your own buffered writer (e.g., use a different
/// capacity). All other constructors wrap the writer given in a buffer
/// with default capacity.
pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> {
Self::from_csv_writer(csv::Writer::from_buffer(buf))
}
}
impl<E: Encodable> Writer<Vec<u8>, E> {
/// Creates a new CSV writer that writes to an in memory buffer. At any
/// time, `as_string` or `as_bytes` can be called to retrieve the
/// cumulative CSV data.
pub fn from_memory() -> Writer<Vec<u8>, E> {
Self::from_csv_writer(csv::Writer::from_memory())
}
/// Returns the written CSV data as a string.
pub fn as_string(&mut self) -> &str {
self.csv.as_string()
}
/// Returns the encoded CSV data as raw bytes.
pub fn as_bytes(&mut self) -> &[u8] {
self.csv.as_bytes()
}
/// Convert the Writer into a string of written CSV data
pub fn into_string(self) -> String {
self.csv.into_string()
}
/// Convert the Writer into a vector of encoded CSV bytes.
pub fn into_bytes(self) -> Vec<u8> |
}
impl<W: Write, E: Encodable> Writer<W, E> {
/// Writes a record by encoding any `Encodable` value.
///
/// When the first record is encoded, the headers (the field names in the
/// struct) are written prior to encoding the record.
///
/// The type that is being encoded into should correspond to *one full CSV
/// record*. This can be a single struct, or arbitrarily nested tuples,
/// arrays, Vecs, and structs, as long as all scalar types (integers,
/// floats, characters, strings, collections containing one scalar, and
/// enums with 0 or 1 scalar arguments) are fields in structs. Enums with
/// zero arguments encode to their name, while enums of one argument encode
/// to their constituent value. `Option` types are also supported. (`None`
/// encodes to an empty field.)
///
/// Note that single-element tuple structs (the newtype pattern) are
/// supported. Unfortunately, to provide this functionality, a heuristic is
/// necessary to differentiate field names in normal structs from those in
/// tuple structs. As a result, field names in normal structs should not be
/// of the form `_field{}` where `{}` is its position in the struct.
///
/// # Example
///
/// This example encodes a zoo animals with may not have a description.
///
/// ```rust
/// extern crate rustc_serialize;
/// # extern crate typed_csv;
/// # fn main() {
///
/// #[derive(RustcEncodable)]
/// struct Count(usize);
///
/// #[derive(RustcEncodable)]
/// enum Group {
/// Bird,
/// Mammal,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part1 {
/// count: Count,
/// animal: &'static str,
/// }
///
/// #[derive(RustcEncodable)]
/// struct Part2 {
/// group: Group,
/// description: Option<&'static str>,
/// }
///
/// let records = vec![
/// (
/// Part1 { count: Count(7), animal: "penguin" },
/// Part2 { group: Group::Bird, description: Some("happy") },
/// ),
/// (
/// Part1 { count: Count(10), animal: "cheetah" },
/// Part2 { group: Group::Mammal, description: Some("fast") },
/// ),
/// (
/// Part1 { count: Count(4), animal: "armadillo" },
/// Part2 { group: Group::Mammal, description: Some("armored") },
/// ),
/// (
/// Part1 { count: Count(9), animal: "platypus" },
/// Part2 { group: Group::Mammal, description: None },
/// ),
/// ];
///
/// let mut wtr = typed_csv::Writer::from_memory();
/// for record in records.into_iter() {
/// wtr.encode(record).unwrap();
/// }
///
/// assert_eq!(wtr.as_string(), "\
/// count,animal,group,description
/// 7,penguin,Bird,happy
/// 10,cheetah,Mammal,fast
/// 4,armadillo,Mammal,armored
/// 9,platypus,Mammal,
/// ");
/// # }
/// ```
pub fn encode(&mut self, row: E) -> csv::Result<()> {
// Write headers if this is the first row.
if self.first_row {
let mut field_names_encoder = FieldNamesEncoder::new();
row.encode(&mut field_names_encoder)?;
self.csv.write(field_names_encoder.into_field_names().into_iter())?;
self.first_row = false;
}
// Write row.
let mut erecord = csv::Encoded::new();
row.encode(&mut erecord)?;
self.csv.write(erecord.unwrap().into_iter())
}
/// Flushes the underlying buffer.
pub fn flush(&mut self) -> Result<()> {
self.csv.flush()
}
}
#[cfg(test)]
mod tests {
use super::Writer;
#[derive(RustcEncodable)]
struct SimpleStruct {
a: usize,
b: usize,
}
#[test]
fn test_struct() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
w.encode(s1).unwrap();
let s2 = SimpleStruct { a: 3, b: 4 };
w.encode(s2).unwrap();
assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n");
}
#[test]
fn test_tuple_of_structs() {
let mut w = Writer::from_memory();
let s1 = SimpleStruct { a: 0, b: 1 };
let s2 = SimpleStruct { a: 2, b: 3 };
w.encode((s1, s2)).unwrap();
let s3 = SimpleStruct { a: 4, b: 5 };
let s4 = SimpleStruct { a: 6, b: 7 };
w.encode((s3, s4)).unwrap();
assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n");
}
#[test]
fn test_array_of_structs() | {
self.csv.into_bytes()
} | identifier_body |
interlock_handler.go | posetPosetDagContext struct {
*evalContext
dbReader *dbreader.DBReader
lockStore *lockstore.MemStore
resolvedLocks []uint64
posetPosetDagReq *fidelpb.PosetDagRequest
keyRanges []*interlock.KeyRange
startTS uint64
}
// handleCoFIDelAGRequest handles interlock PosetDag request.
func handleCoFIDelAGRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *interlock.Request) *interlock.Response {
startTime := time.Now()
resp := &interlock.Response{}
posetPosetDagCtx, posetPosetDagReq, err := buildPosetDag(dbReader, lockStore, req)
if err != nil {
resp.OtherError = err.Error()
return resp
}
closureExec, err := buildClosureExecutor(posetPosetDagCtx, posetPosetDagReq)
if err != nil {
return buildResp(nil, nil, posetPosetDagReq, err, posetPosetDagCtx.sc.GetWarnings(), time.Since(startTime))
}
chunks, err := closureExec.execute()
return buildResp(chunks, closureExec.counts, posetPosetDagReq, err, posetPosetDagCtx.sc.GetWarnings(), time.Since(startTime))
}
func buildPosetDag(reader *dbreader.DBReader, lockStore *lockstore.MemStore, req *interlock.Request) (*posetPosetDagContext, *fidelpb.PosetDagRequest, error) {
if len(req.Ranges) == 0 {
return nil, nil, errors.New("request range is null")
}
if req.GetTp() != solomonkey.ReqTypePosetDag {
return nil, nil, errors.Errorf("unsupported request type %d", req.GetTp())
}
posetPosetDagReq := new(fidelpb.PosetDagRequest)
err := proto.Unmarshal(req.Data, posetPosetDagReq)
if err != nil {
return nil, nil, errors.Trace(err)
}
sc := flagsToStatementContext(posetPosetDagReq.Flags)
sc.TimeZone = time.FixedZone("UTC", int(posetPosetDagReq.TimeZoneOffset))
ctx := &posetPosetDagContext{
evalContext: &evalContext{sc: sc},
dbReader: reader,
lockStore: lockStore,
posetPosetDagReq: posetPosetDagReq,
keyRanges: req.Ranges,
startTS: req.StartTs,
resolvedLocks: req.Context.ResolvedLocks,
}
scanExec := posetPosetDagReq.Executors[0]
if scanExec.Tp == fidelpb.ExecType_TypeTableScan {
ctx.setDeferredCausetInfo(scanExec.TblScan.DeferredCausets)
ctx.primaryDefCauss = scanExec.TblScan.PrimaryDeferredCausetIds
} else {
ctx.setDeferredCausetInfo(scanExec.IdxScan.DeferredCausets)
}
return ctx, posetPosetDagReq, err
}
func getAggInfo(ctx *posetPosetDagContext, pbAgg *fidelpb.Aggregation) ([]aggregation.Aggregation, []expression.Expression, error) {
length := len(pbAgg.AggFunc)
aggs := make([]aggregation.Aggregation, 0, length)
var err error
for _, expr := range pbAgg.AggFunc {
var aggExpr aggregation.Aggregation
aggExpr, err = aggregation.NewDistAggFunc(expr, ctx.fieldTps, ctx.sc)
if err != nil {
return nil, nil, errors.Trace(err)
}
aggs = append(aggs, aggExpr)
}
groupBys, err := convertToExprs(ctx.sc, ctx.fieldTps, pbAgg.GetGroupBy())
if err != nil {
return nil, nil, errors.Trace(err)
}
return aggs, groupBys, nil
}
func getTopNInfo(ctx *evalContext, topN *fidelpb.TopN) (heap *topNHeap, conds []expression.Expression, err error) {
pbConds := make([]*fidelpb.Expr, len(topN.OrderBy))
for i, item := range topN.OrderBy {
pbConds[i] = item.Expr
}
heap = &topNHeap{
totalCount: int(topN.Limit),
topNSorter: topNSorter{
orderByItems: topN.OrderBy,
sc: ctx.sc,
},
}
if conds, err = convertToExprs(ctx.sc, ctx.fieldTps, pbConds); err != nil {
return nil, nil, errors.Trace(err) | return heap, conds, nil
}
type evalContext struct {
colIDs map[int64]int
columnInfos []*fidelpb.DeferredCausetInfo
fieldTps []*types.FieldType
primaryDefCauss []int64
sc *stmtctx.StatementContext
}
func (e *evalContext) setDeferredCausetInfo(defcaus []*fidelpb.DeferredCausetInfo) {
e.columnInfos = make([]*fidelpb.DeferredCausetInfo, len(defcaus))
INTERLOCKy(e.columnInfos, defcaus)
e.colIDs = make(map[int64]int, len(e.columnInfos))
e.fieldTps = make([]*types.FieldType, 0, len(e.columnInfos))
for i, col := range e.columnInfos {
ft := fieldTypeFromPBDeferredCauset(col)
e.fieldTps = append(e.fieldTps, ft)
e.colIDs[col.GetDeferredCausetId()] = i
}
}
func (e *evalContext) newRowDecoder() (*rowcodec.ChunkDecoder, error) {
var (
pkDefCauss []int64
defcaus = make([]rowcodec.DefCausInfo, 0, len(e.columnInfos))
)
for i := range e.columnInfos {
info := e.columnInfos[i]
ft := e.fieldTps[i]
col := rowcodec.DefCausInfo{
ID: info.DeferredCausetId,
Ft: ft,
IsPKHandle: info.PkHandle,
}
defcaus = append(defcaus, col)
if info.PkHandle {
pkDefCauss = append(pkDefCauss, info.DeferredCausetId)
}
}
if len(pkDefCauss) == 0 {
if e.primaryDefCauss != nil {
pkDefCauss = e.primaryDefCauss
} else {
pkDefCauss = []int64{0}
}
}
def := func(i int, chk *chunk.Chunk) error {
info := e.columnInfos[i]
if info.PkHandle || len(info.DefaultVal) == 0 {
chk.AppendNull(i)
return nil
}
decoder := codec.NewDecoder(chk, e.sc.TimeZone)
_, err := decoder.DecodeOne(info.DefaultVal, i, e.fieldTps[i])
if err != nil {
return err
}
return nil
}
return rowcodec.NewChunkDecoder(defcaus, pkDefCauss, def, e.sc.TimeZone), nil
}
// decodeRelatedDeferredCausetVals decodes data to Causet slice according to the event information.
func (e *evalContext) decodeRelatedDeferredCausetVals(relatedDefCausOffsets []int, value [][]byte, event []types.Causet) error {
var err error
for _, offset := range relatedDefCausOffsets {
event[offset], err = blockcodec.DecodeDeferredCausetValue(value[offset], e.fieldTps[offset], e.sc.TimeZone)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// flagsToStatementContext creates a StatementContext from a `fidelpb.SelectRequest.Flags`.
func flagsToStatementContext(flags uint64) *stmtctx.StatementContext {
sc := new(stmtctx.StatementContext)
sc.IgnoreTruncate = (flags & perceptron.FlagIgnoreTruncate) > 0
sc.TruncateAsWarning = (flags & perceptron.FlagTruncateAsWarning) > 0
sc.InInsertStmt = (flags & perceptron.FlagInInsertStmt) > 0
sc.InSelectStmt = (flags & perceptron.FlagInSelectStmt) > 0
sc.InDeleteStmt = (flags & perceptron.FlagInUFIDelateOrDeleteStmt) > 0
sc.OverflowAsWarning = (flags & perceptron.FlagOverflowAsWarning) > 0
sc.IgnoreZeroInDate = (flags & perceptron.FlagIgnoreZeroInDate) > 0
sc.DividedByZeroAsWarning = (flags & perceptron.FlagDividedByZeroAsWarning) > 0
return sc
}
// ErrLocked is returned when trying to Read/Write on a locked key. Client should
// backoff or cleanup the dagger then retry.
type ErrLocked struct {
Key []byte
Primary []byte
StartTS uint64
TTL uint64
LockType uint8
}
// BuildLockErr generates Err | }
| random_line_split |
widget.js | Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
| (string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
| _sanitizeQuery | identifier_name |
widget.js | Stage.Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId'); | deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
| let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) { | random_line_split |
widget.js | .Basic.NodeFilter.EMPTY_VALUE, storeValueInContext: true},
{id: 'charts', name: 'Charts table', description: 'Charts configuration table', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 5, columns: [
{name: 'metric', label: 'Metric', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.MetricFilter, description: 'Metric to be presented on the chart', filterContextName: 'nodeFilter'},
{name: 'label', label: 'Label', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'Chart label'}
]},
{id: 'query', name: 'Custom Influx Query', description: 'Please note that below query builder overrides the series defined in \'Charts table\'', default: '', type: Stage.Basic.GenericField.CUSTOM_TYPE,
component: Stage.Basic.Form.Table, rows: 1, columns: [
{name: 'qSelect', label: 'SELECT', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: ''},
{name: 'qFrom', label: 'FROM', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${deploymentId} token to inject dynamic deployment ID. Example: \'/${deploymentId}\..*\.((memory_MemFree))$/\''},
{name: 'qWhere', label: 'WHERE', default: '', type: Stage.Basic.GenericField.STRING_TYPE, description: 'You can use ${timeFilter} token to inject dynamic data/time ranges.'}
]},
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string) | ,
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
| {
return string.replace(/;/g, '');
} | identifier_body |
widget.js | },
{id: 'type', name: 'Charts type', items: [
{name:'Line chart', value:Stage.Basic.Graphs.Graph.LINE_CHART_TYPE},
{name:'Bar chart', value:Stage.Basic.Graphs.Graph.BAR_CHART_TYPE},
{name:'Area chart', value:Stage.Basic.Graphs.Graph.AREA_CHART_TYPE}],
default: Stage.Basic.Graphs.Graph.LINE_CHART_TYPE, type: Stage.Basic.GenericField.LIST_TYPE},
{id: 'timeFilter', name: 'Time range and resolution', description: 'Time range and time resolution for all defined charts',
type: Stage.Basic.GenericField.CUSTOM_TYPE, component: Stage.Basic.TimeFilter,
default: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE, defaultValue: Stage.Basic.TimeFilter.INFLUX_DEFAULT_VALUE}
],
UNCONFIGURED_STATE: 'unconfigured',
EMPTY_RESPONSE_STATE: 'emptyResponse',
_prepareData: function(data, xDataKey) {
const TIME_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const MAX_NUMBER_OF_POINTS = 200;
const TIME_INDEX = 0;
const VALUE_INDEX = 1;
const REFERENCE_METRIC_INDEX = 0;
const NUMBER_OF_METRICS = data.length;
const NUMBER_OF_POINTS = data[REFERENCE_METRIC_INDEX].points.length;
let points = [];
// Data conversion to recharts format
// As a reference time points list, metric no. REFERENCE_METRIC_INDEX is taken
for (let i = 0; i < NUMBER_OF_POINTS; i++) {
let point = { [xDataKey]: Stage.Utils.formatTimestamp(data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX], TIME_FORMAT, null) };
for (let j = 0; j < NUMBER_OF_METRICS; j++) {
if (data[j].points[i] &&
data[REFERENCE_METRIC_INDEX].points[i][TIME_INDEX] === data[j].points[i][TIME_INDEX])
{
let metricName = data[j].name;
let pointValue = data[j].points[i][VALUE_INDEX];
point[metricName] = pointValue;
}
}
points.push(point);
}
// Data optimization (show no more than MAX_NUMBER_OF_POINTS points on the graph)
if (points.length > MAX_NUMBER_OF_POINTS) {
let optimizedPoints = [];
let delta = parseFloat(points.length / MAX_NUMBER_OF_POINTS);
for (let i = 0; i < points.length; i = i + delta) {
optimizedPoints.push(points[Math.floor(i)]);
}
points = optimizedPoints;
}
return points;
},
_getChartsMetricsList: function(charts) {
return _.chain(charts)
.filter((graph) => !_.isEmpty(graph.metric))
.map((graph) => graph.metric)
.uniq()
.value();
},
_getChartsConfiguration: function(charts, query, data) {
let chartsConfig = [];
if (!_.isEmpty(query)) {
_.forEach(data, (chart) => {
chartsConfig.push({
name: chart.name,
label: chart.name,
axisLabel: ''
});
})
} else {
_.forEach(charts, (chart) => {
let chartName = chart.metric;
if (!_.isEmpty(chartName)) {
chartsConfig.push({
name: chartName,
label: chart.label ? chart.label : chartName,
axisLabel: ''
});
}
})
chartsConfig = _.uniqBy(chartsConfig, 'name');
}
return chartsConfig;
},
_sanitizeQuery(string){
return string.replace(/;/g, '');
},
_prepareInfluxQuery: function(queries, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup) {
return _.map(queries, (queryParams) => {
let selectWhat = this._sanitizeQuery(queryParams.qSelect);
let selectFrom = this._sanitizeQuery(queryParams.qFrom);
let selectWhere = this._sanitizeQuery(queryParams.qWhere);
if (!_.isEmpty(selectWhat) && !_.isEmpty(selectFrom)) {
if ((_.includes(selectFrom, '${deploymentId}') && _.isEmpty(deploymentId)) ||
(_.includes(selectFrom, '${nodeId}') && _.isEmpty(nodeId)) ||
(_.includes(selectFrom, '${nodeInstanceId}') && _.isEmpty(nodeInstanceId)))
return {};
selectFrom = _.replace(selectFrom, '${deploymentId}', deploymentId);
selectFrom = _.replace(selectFrom, '${nodeId}', nodeId);
selectFrom = _.replace(selectFrom, '${nodeInstanceId}', nodeInstanceId);
selectWhere = _.replace(selectWhere, '${timeFilter}', `time > ${from} and time < ${to} group by time(${timeGroup})`);
if (_.isEmpty(selectWhere))
return {qSelect: selectWhat, qFrom: selectFrom};
else
return {qSelect: selectWhat, qFrom: selectFrom, qWhere: selectWhere};
} else
return {};
});
},
_isEmptyResponse: function(widget, data) {
return data.state === widget.definition.EMPTY_RESPONSE_STATE;
},
_isWidgetNotConfigured: function(widget, data) {
return data.state === widget.definition.UNCONFIGURED_STATE;
},
fetchParams: function(widget, toolbox) {
let deploymentId = toolbox.getContext().getValue('deploymentId');
let nodeId = toolbox.getContext().getValue('nodeId');
let nodeInstanceId = toolbox.getContext().getValue('nodeInstanceId');
let nodeFilterFromWidget = widget.configuration.nodeFilter;
if (nodeFilterFromWidget.deploymentId || nodeFilterFromWidget.nodeId || nodeFilterFromWidget.nodeInstanceId) {
deploymentId = nodeFilterFromWidget.deploymentId;
nodeId = nodeFilterFromWidget.nodeId;
nodeInstanceId = nodeFilterFromWidget.nodeInstanceId;
}
let timeFilterFromWidget = widget.configuration.timeFilter;
let timeFilterFromContext = toolbox.getContext().getValue('timeFilter');
let timeStart = _.get(timeFilterFromContext, 'start', timeFilterFromWidget.start);
timeStart = moment(timeStart).isValid() ? `${moment(timeStart).unix()}s` : timeStart;
let timeEnd = _.get(timeFilterFromContext, 'end', timeFilterFromWidget.end);
timeEnd = moment(timeEnd).isValid() ? `${moment(timeEnd).unix()}s` : timeEnd;
let timeResolution = _.get(timeFilterFromContext, 'resolution', timeFilterFromWidget.resolution);
let timeUnit = _.get(timeFilterFromContext, 'unit', timeFilterFromWidget.unit);
let timeGroup = `${timeResolution}${timeUnit}`;
return { deploymentId, nodeId, nodeInstanceId, timeStart, timeEnd, timeGroup };
},
fetchData: function(widget, toolbox, params) {
const actions = new Stage.Common.InfluxActions(toolbox);
const deploymentId = params.deploymentId;
const nodeId = params.nodeId;
const nodeInstanceId = params.nodeInstanceId;
const metrics = this._getChartsMetricsList(widget.configuration.charts);
const from = params.timeStart;
const to = params.timeEnd;
const timeGroup = params.timeGroup;
const preparedQuery = _.head(this._prepareInfluxQuery(widget.configuration.query, deploymentId, nodeId, nodeInstanceId, from, to, timeGroup));
if (!_.isEmpty(preparedQuery)) {
toolbox.loading(true);
return actions.doRunQuery(preparedQuery.qSelect, preparedQuery.qFrom, preparedQuery.qWhere).then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
}).catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check your Influx query syntax and try again. Error: ' +
error.message || error);
});
} else if (!_.isEmpty(deploymentId) && !_.isEmpty(nodeInstanceId) && !_.isEmpty(metrics)) {
toolbox.loading(true);
return actions.doGetMetric(deploymentId, nodeId, nodeInstanceId, metrics, from, to, timeGroup)
.then((data) => {
toolbox.loading(false);
let formattedResponse
= _.map(data, (metric) => ({name: _.last(_.split(metric.name, '.')), points: metric.points}));
return Promise.resolve(_.isEmpty(data) ? {state: widget.definition.EMPTY_RESPONSE_STATE} : formattedResponse);
})
.catch((error) => {
toolbox.loading(false);
return Promise.reject('There was a problem while querying for data. ' +
'Please check Deployment ID, Node ID, Node Instance ID, Metric and time range. Error: ' +
error.message || error);
});
} else {
toolbox.loading(false);
return Promise.resolve({state: widget.definition.UNCONFIGURED_STATE});
}
},
render: function(widget,data,error,toolbox) {
let {charts, query, type} = widget.configuration;
let {Message, Icon} = Stage.Basic;
if (_.isEmpty(data)) {
return (
<Stage.Basic.Loading/>
);
} else if (this._isWidgetNotConfigured(widget, data)) | {
return (
<Message info icon>
<Icon name='info' />
Please select deployment, node instance and metric in widget's configuration to present the data graph.
</Message>
);
} | conditional_block |
|
parse.go | ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.ValidString(symStr)
if !valid {
p.err = "invalid utf-8 in symbol"
return 0
}
ret = Symbol(symStr)
}
}
p.buf = p.buf[:0]
p.popState(ret)
return 0
}
func escapeh(p *Parser, state *ParseState, c byte) int {
digit := toHex(c)
if digit < 0 {
p.err = "invalid hex digit in hex escape"
return 1
}
state.argn = (state.argn << 4) + digit
state.counter--
if state.counter == 0 {
p.buf = append(p.buf, byte(state.argn&0xFF))
state.argn = 0
state.consumer = stringchar
}
return 1
}
func escape1(p *Parser, state *ParseState, c byte) int {
e := checkEscape(c)
if e < 0 {
p.err = "invalid string escape sequence"
return 1
}
if c == 'x' {
state.counter = 2
state.argn = 0
state.consumer = escapeh
} else {
p.buf = append(p.buf, c)
state.consumer = stringchar
}
return 1
}
func stringend(p *Parser, state *ParseState) int {
var ret Value
buf := p.buf
if (state.flags & PFLAG_LONGSTRING) != 0 {
/* Check for leading newline character so we can remove it */
if buf[0] == '\n' {
buf = buf[1:]
}
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
buf = buf[:len(buf)-1]
}
}
if (state.flags & PFLAG_BUFFER) != 0 {
b := NewBuffer(len(buf))
_, _ = b.Buf.Write(buf)
ret = b
} else {
ret = String(buf)
}
p.buf = []byte{}
p.popState(ret)
return 1
}
func stringchar(p *Parser, state *ParseState, c byte) int {
/* Enter escape */
if c == '\\' {
state.consumer = escape1
return 1
}
/* String end */
if c == '"' {
return stringend(p, state)
}
/* normal char */
if c != '\n' && c != '\r' {
p.buf = append(p.buf, c)
}
return 1
}
const PFLAG_INSTRING = 0x100000
const PFLAG_END_CANDIDATE = 0x200000
func longstring(p *Parser, state *ParseState, c byte) int {
if (state.flags & PFLAG_INSTRING) != 0 {
/* We are inside the long string */
if c == '`' {
state.flags |= PFLAG_END_CANDIDATE
state.flags &= ^PFLAG_INSTRING
state.counter = 1 /* Use counter to keep track of number of '=' seen */
return 1
}
p.buf = append(p.buf, c)
return 1
} else if (state.flags & PFLAG_END_CANDIDATE) != 0 {
/* We are checking a potential end of the string */
if state.counter == state.argn {
stringend(p, state)
return 0
}
if c == '`' && state.counter < state.argn {
state.counter += 1
return 1
}
/* Failed end candidate */
for i := 0; i < state.counter; i++ {
p.buf = append(p.buf, '`')
}
p.buf = append(p.buf, c)
state.counter = 0
state.flags &= ^PFLAG_END_CANDIDATE
state.flags |= PFLAG_INSTRING
return 1
} else {
/* We are at beginning of string */
state.argn += 1
if c != '`' {
state.flags |= PFLAG_INSTRING
p.buf = append(p.buf, c)
}
return 1
}
}
func comment(p *Parser, state *ParseState, c byte) int {
if c == '\n' {
p.states = p.states[:len(p.states)-1]
p.buf = p.buf[:0]
} else {
p.buf = append(p.buf, c)
}
return 1
}
func atsign(p *Parser, state *ParseState, c byte) int {
p.states = p.states[:len(p.states)-1]
switch c {
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS|PFLAG_ATSYM)
return 1
case '"':
p.pushState(stringchar, PFLAG_BUFFER|PFLAG_STRING)
return 1
case '`':
p.pushState(longstring, PFLAG_BUFFER|PFLAG_LONGSTRING)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS|PFLAG_ATSYM)
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS|PFLAG_ATSYM)
return 1
default:
break
}
p.pushState(tokenchar, PFLAG_TOKEN)
p.buf = append(p.buf, '@')
return 0
}
// Public api
func (parser *Parser) Init() {
parser.args = []Value{}
parser.states = []ParseState{}
parser.buf = []byte{}
parser.err = ""
parser.lookback = 0
parser.line = 1
parser.column = 0
parser.pending = 0
parser.flag = 0
parser.pushState(root, PFLAG_CONTAINER)
}
func (parser *Parser) Consume(c byte) {
consumed := 0
parser.checkDead()
if c == '\r' {
parser.line += 1
parser.column = 0
} else if c == '\n' {
parser.column = 0
if parser.lookback != '\r' {
parser.line += 1
}
} else {
parser.column += 1
}
for consumed == 0 && parser.err == "" {
state := &parser.states[len(parser.states)-1]
consumed = state.consumer(parser, state, c)
}
parser.lookback = c
}
func (parser *Parser) | Produce | identifier_name |
|
parse.go | letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
} | for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. | }
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn) | random_line_split |
parse.go | */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for | t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
}
}
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn)
for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0
}
ret = Keyword(kwStr)
} else if startNum && scanNumber(string(p.buf), &numval) {
ret = Number(numval)
} else if strEqBuf("nil", p.buf) {
ret = nil
} else if strEqBuf("false", p.buf) {
ret = Bool(false)
} else if strEqBuf("true", p.buf) {
ret = Bool(true)
} else {
if startDig {
p.err = "symbol literal cannot start with a digit"
return 0
} else {
symStr := string(p.buf)
/* Don't do full utf-8 check unless we have seen non ascii characters. | {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>" | conditional_block |
parse.go |
func checkEscape(c byte) int {
switch c {
default:
return -1
case 'x':
return 1
case 'n':
return '\n'
case 't':
return '\t'
case 'r':
return '\r'
case '0':
return 0
case 'z':
return 0
case 'f':
return '\f'
case 'v':
return '\v'
case 'e':
return 27
case '"':
return '"'
case '\\':
return '\\'
}
}
/* Get hex digit from a letter */
func toHex(c byte) int {
if c >= '0' && c <= '9' {
return int(c) - '0'
} else if c >= 'A' && c <= 'F' {
return 10 + int(c) - 'A'
} else if c >= 'a' && c <= 'f' {
return 10 + int(c) - 'a'
} else {
return -1
}
}
func (parser *Parser) checkDead() {
if parser.flag != 0 {
JanetPanic("parser is dead, cannot consume")
}
if parser.err != "" {
JanetPanic("parser has unchecked error, cannot consume")
}
}
func (parser *Parser) pushState(consumer Consumer, flags int) {
state := ParseState{
counter: 0,
argn: 0,
flags: flags,
consumer: consumer,
line: parser.line,
column: parser.column,
}
parser.states = append(parser.states, state)
}
func (p *Parser) popState(val Value) {
for {
top := p.states[len(p.states)-1]
p.states = p.states[:len(p.states)-1]
newtop := &p.states[len(p.states)-1]
if (newtop.flags & PFLAG_CONTAINER) != 0 {
switch val := val.(type) {
case *Tuple:
val.Line = top.line
val.Column = top.column
default:
}
newtop.argn += 1
/* Keep track of number of values in the root state */
if len(p.states) == 1 {
p.pending += 1
}
p.args = append(p.args, val)
return
} else if (newtop.flags & PFLAG_READERMAC) != 0 {
which := "<unknown>"
t := NewTuple(2, 2)
c := newtop.flags & 0xFF
switch c {
case '\'':
which = "quote"
case ',':
which = "unquote"
case ';':
which = "splice"
case '|':
which = "short-fn"
case '~':
which = "quasiquote"
default:
}
t.Vals[0] = Symbol(which)
t.Vals[1] = val
/* Quote source mapping info */
t.Line = newtop.line
t.Column = newtop.column
val = t
} else {
return
}
}
}
func (p *Parser) closeArray(state *ParseState) *Array {
array := NewArray(state.argn, state.argn)
for i := state.argn - 1; i >= 0; i = i - 1 {
array.Data[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return array
}
func (p *Parser) closeTuple(state *ParseState, flags int) *Tuple {
tup := NewTuple(state.argn, state.argn)
tup.Flags = flags
for i := state.argn - 1; i >= 0; i = i - 1 {
tup.Vals[i] = p.args[len(p.args)-1]
p.args = p.args[:len(p.args)-1]
}
return tup
}
func (p *Parser) closeStruct(state *ParseState) *Struct {
/*
JanetKV *st = janet_struct_begin(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_struct_put(st, key, value);
}
return janet_wrap_struct(janet_struct_end(st));
*/
panic("XXX")
}
func (p *Parser) closeTable(state *ParseState) *Table {
/*
JanetTable *table = janet_table(state->argn >> 1);
for (int32_t i = state->argn; i > 0; i -= 2) {
Janet value = p->args[--p->argcount];
Janet key = p->args[--p->argcount];
janet_table_put(table, key, value);
}
return janet_wrap_table(table);
*/
panic("XXX")
}
func root(p *Parser, state *ParseState, c byte) int {
switch c {
default:
if isWhitespace(c) {
return 1
}
if !isSymbolChar(c) {
p.err = "unexpected character"
return 1
}
p.pushState(tokenchar, PFLAG_TOKEN)
return 0
case '\'', ',', ';', '~', '|':
p.pushState(root, PFLAG_READERMAC|int(c))
return 1
case '"':
p.pushState(stringchar, PFLAG_STRING)
return 1
case '#':
p.pushState(comment, PFLAG_COMMENT)
return 1
case '@':
p.pushState(atsign, PFLAG_ATSYM)
return 1
case '`':
p.pushState(longstring, PFLAG_LONGSTRING)
return 1
case ')', ']', '}':
{
var ds Value
if len(p.states) == 1 {
p.err = "unexpected delimiter"
return 1
}
if (c == ')' && (state.flags&PFLAG_PARENS) != 0) ||
(c == ']' && (state.flags&PFLAG_SQRBRACKETS) != 0) {
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeArray(state)
} else {
tupFlags := 0
if c == ']' {
tupFlags = JANET_TUPLE_FLAG_BRACKETCTOR
}
ds = p.closeTuple(state, tupFlags)
}
} else if c == '}' && ((state.flags & PFLAG_CURLYBRACKETS) != 0) {
if (state.argn % 2) == 1 {
p.err = "struct and table literals expect even number of arguments"
return 1
}
if (state.flags & PFLAG_ATSYM) != 0 {
ds = p.closeTable(state)
} else {
ds = p.closeStruct(state)
}
} else {
p.err = "mismatched delimiter"
return 1
}
p.popState(ds)
}
return 1
case '(':
p.pushState(root, PFLAG_CONTAINER|PFLAG_PARENS)
return 1
case '[':
p.pushState(root, PFLAG_CONTAINER|PFLAG_SQRBRACKETS)
return 1
case '{':
p.pushState(root, PFLAG_CONTAINER|PFLAG_CURLYBRACKETS)
return 1
}
}
func strEqBuf(str string, buf []byte) bool {
if len(str) != len(buf) {
return false
}
for i, b := range buf {
if str[i] != b {
return false
}
}
return true
}
// XXX This will need to be ported from the original janet.
// XXX avoiding casting to string will be more efficient.
func scanNumber(s string, out *float64) bool {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return false
}
*out = v
return true
}
func tokenchar(p *Parser, state *ParseState, c byte) int {
var ret Value
var numval float64
if isSymbolChar(c) {
p.buf = append(p.buf, c)
if c > 127 {
state.argn = 1 /* Use to indicate non ascii */
}
return 1
}
/* Token finished */
startDig := p.buf[0] >= '0' && p.buf[0] <= '9'
startNum := startDig || p.buf[0] == '-' || p.buf[0] == '+' || p.buf[0] == '.'
if p.buf[0] == ':' {
kwStr := p.buf[1:]
/* Don't do full utf-8 check unless we have seen non ascii characters. */
valid := (state.argn == 0) || utf8.Valid(kwStr)
if !valid {
p.err = "invalid utf-8 in keyword"
return 0 | {
return (symchars[c>>5] & (uint32(1) << (c & 0x1F))) != 0
} | identifier_body |
|
flux_calc.py |
in_dir = out_dir0 + 'gridded_polygons/'
out_dir = out_dir0 + 'fluxes/'
Lfun.make_dir(out_dir, clean=True)
# load polygon results
gpoly_dict = pickle.load(open(in_dir + 'gpoly_dict.p', 'rb'))
shared_faces_dict = pickle.load(open(in_dir + 'shared_faces.p', 'rb'))
# and specify z levels
z_dict = {0:5, 1:-5, 2:-25, 3:-50, 4:-100, 5:-150, 6:-350}
NLAY = len(z_dict) - 1
#%% find fluxes
dt0 = datetime(whichyear,1,1)
if Ldir['lo_env'] == 'pm_mac':
# have 2006.07.01-31 = days 181 to 211
# big convergence errors for 7/29, 7/30 = 209, 210
day_list = [208, 209] #range(181,211+1)
elif Ldir['lo_env'] == 'pm_fjord':
# in /data1/parker/roms/output/salish_2006_4_lp
# we have f2006.01.04 through 2016.12.29
# = days 3 to 362
day_list = range(3, 363)
counter = 0
for nday in day_list:
tt0 = time.time()
# specify ROMS file to work on
dt0 = datetime(whichyear,1,1)
dt = dt0 + timedelta(days=nday)
f_string = 'f' + dt.strftime('%Y.%m.%d')
print('\nWorking on day %s (nday = %3d)' % (f_string, nday))
R_in_dir = R_in_dir0 + f_string + '/'
R_fn = R_in_dir + 'low_passed.nc'
ds = nc.Dataset(R_fn)
u = ds['u'][:].squeeze()
v = ds['v'][:].squeeze()
w0 = ds['w'][0,-1,:,:].squeeze()
G = zrfun.get_basic_info(R_fn, only_G=True)
S = zrfun.get_basic_info(R_fn, only_S=True)
zeta = ds['zeta'][0,:,:]
z_rho, z_w = zrfun.get_z(G['h'], zeta, S)
DA = G['DX'] * G['DY']
DAm = np.ma.masked_where(zeta.mask, DA)
DZ = np.diff(z_w, axis=0)
# make versions of DA masked for the z layers
zDAm = DAm * np.ones((NLAY, 1, 1))
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
zmask = (-G['h'] >= z_lower)
draft_DA = DA.copy()
draft_DA[zmask] = 0.
this_DAm = np.ma.masked_where(zeta.mask, draft_DA)
zDAm[nlay,:,:] = this_DAm
# Z on u and v grids
Zu = z_rho[:, :, :-1] + np.diff(z_rho, axis=2)/2
Zv = z_rho[:, :-1, :] + np.diff(z_rho, axis=1)/2
zmu_dict = dict()
zmv_dict = dict()
for nlay in range(NLAY):
z_lower = z_dict[nlay + 1] # lower z
z_upper = z_dict[nlay] # upper z
zmu_dict[nlay] = (Zu > z_lower) & (Zu <= z_upper)
zmv_dict[nlay] = (Zv > z_lower) & (Zv <= z_upper)
layu_dict = dict()
layv_dict = dict()
# DZ on u and v grids
DZu = DZ[:, :, :-1] + np.diff(DZ, axis=2)/2
DZv = DZ[:, :-1, :] + np.diff(DZ, axis=1)/2
# DX and DY on u and v grids
DYu = G['DY'][:, :-1]
DXv = G['DX'][:-1, :] + np.diff(G['DX'], axis=0)/2
# cell areas for u and v grid box faces
DAHu = DZu * DYu
DAHv = DZv * DXv
# Initialize arrays to store transport data.
face_trans_dict = dict()
poly_conv_dict = dict()
face_ztrans_dict = dict()
poly_zconv_dict = dict()
# calculate convergence in each polygon
counter = 0
NPOLY = len(gpoly_dict)
for npoly in range(NPOLY):
#print(' npoly = ' + str(npoly))
# we have two objects associated with a given polygon,
# * per_dict[iseg] has arrays of boundary information, and
# * ji_rho_in is an array of indices of interior points
per_dict = gpoly_dict[npoly]['per_dict']
ji_rho_in = gpoly_dict[npoly]['ji_rho_in']
j_in = ji_rho_in[:,0]
i_in = ji_rho_in[:,1]
# find fluxes through the faces
NFACE = len(per_dict)
face_trans_arr = np.zeros(NFACE)
face_area_arr = np.zeros(NFACE)
face_ztrans_arr = np.zeros((NFACE, NLAY))
face_zarea_arr = np.zeros((NFACE, NLAY))
for nface in range(NFACE):
per = per_dict[nface]
JJ = per[:,0]
II = per[:,1]
UV = per[:,2]
PM = per[:,3]
# vectors of integers
JJu = JJ[UV==0]
IIu = II[UV==0]
PMu = PM[UV==0]
JJv = JJ[UV==1]
IIv = II[UV==1]
PMv = PM[UV==1]
# shorter vectors of integers, specific to the u- and v-grids
PMu = PMu.reshape((1, len(PMu)))
PMv = PMv.reshape((1, len(PMv)))
this_u = u[:, JJu, IIu]
this_v = v[:, JJv, IIv]
draft_DAHu = DAHu[:, JJu, IIu]
draft_DAHv = DAHv[:, JJv, IIv]
this_DAHu = np.ma.masked_where(this_u.mask, draft_DAHu)
this_DAHv = np.ma.masked_where(this_v.mask, draft_DAHv)
# check lengths
l1u = this_u.compressed().size
l2u = this_DAHu.compressed().size
if l1u != l2u:
print('Warning: U Result vectors are different lengths')
l1v = this_v.compressed().size
l2v = this_DAHv.compressed().size
if l1v != l2v:
print('Warning: V Result vectors are different lengths')
# do the integrals
if l1u>0:
area_u = this_DAHu.sum()
trans_u = (this_u * this_DAHu * PMu).sum()
else:
area_u = 0.
trans_u = 0.
if l1v>0:
area_v = this_DAHv.sum()
trans_v = (this_v * this_DAHv * PMv).sum()
else:
area_v = 0.
trans_v = 0.
face_trans = trans_u + trans_v
face_trans_arr[nface] = face_trans
face_area = area_u + area_v
face_area_arr[nface] = face_area
# store results for later
face_trans_dict[(npoly, nface)] = (face_trans, face_area)
# START z level code #############################################
# now do the same thing but divvying up into z levels
for nlay in range(NLAY):
this_zmu = zmu_dict[nlay][:, JJu, IIu]
this_zmv = zmv_dict[nlay][:, JJv, IIv]
this_zu = this_u[this_zmu]
this_zv = this_v[this_zmv]
draft_zDAHu = this_DAHu[this_zmu]
draft_zDAHv = this_DAHv[this_zmv]
this_zDAHu = np.ma.masked_where(this_zu.mask, draft_zDAHu)
this_zDAHv = np.ma.masked_where(this_zv.mask, draft_zDAHv)
# check lengths
l1zu = this_zu.com | out_dir0 = Ldir['parent'] + 'ptools_output/atlantis_fjord_2005/' | random_line_split |
|
flux_calc.py | results of z level code
# RESULT it works perfectly (to within roundoff error I think)
fzt = 0.
fza = 0.
for nlay in range(NLAY):
fzt += face_ztrans_dict[(npoly, nface, nlay)][0]
fza += face_ztrans_dict[(npoly, nface, nlay)][1]
if np.abs(fzt - face_trans_dict[(npoly, nface)][0]) > .001:
print('npoly=%d nface=%d transport error' % (npoly, nface))
print('fzt=%0.5f ft=%0.5f' % (fzt, face_trans_dict[(npoly, nface)][0]))
if np.abs(fza - face_trans_dict[(npoly, nface)][1]) > .001:
print('npoly=%d nface=%d area error' % (npoly, nface))
print('fza=%0.5f fa=%0.5f' % (fza, face_trans_dict[(npoly, nface)][1]))
poly_zarea = np.zeros(NLAY)
for nlay in range(NLAY):
this_zarea = zDAm[nlay,j_in,i_in].sum()
try:
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
poly_zarea[nlay] = this_zarea
poly_area = DAm[j_in,i_in].sum()
try:
if poly_area.mask == True:
poly_area = 0.
except AttributeError:
pass
net_conv = face_trans_arr.sum()
if (poly_area > 0):
poly_mean_w = net_conv/poly_area
else:
poly_mean_w = 0.0
# store results for later
net_face_area = face_area_arr.sum()
poly_conv_dict[npoly] = (net_conv, poly_area, poly_zarea, net_face_area, NFACE)
counter += 1
ds.close()
# save originals
orig_poly_conv_dict = poly_conv_dict.copy()
orig_face_trans_dict = face_trans_dict.copy()
orig_face_ztrans_dict = face_ztrans_dict.copy()
# Next try to adjust all polygons to have conv = 0
NITER = 400
for iii in range(NITER):
new_poly_conv_dict = poly_conv_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
new_poly_conv_dict[npoly] = (0.0, poly_area, poly_zarea, net_face_area, NFACE)
new_face_trans_dict = face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
new_net_conv, poly_area, poly_zarea, net_face_area, NFACE = new_poly_conv_dict[npoly]
dconv = new_net_conv - net_conv
if net_face_area != 0.0:
dconv_a = dconv / net_face_area
else:
dconv_a = 0.0
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
if face_trans != 0.0 and face_area != 0.0:
new_face_trans_dict[(npoly, nface)] = (face_trans + dconv_a*face_area, face_area)
else:
pass # keep original values
new_face_trans_dict_copy = new_face_trans_dict.copy()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
try:
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
ipoly, iface = shared_faces_dict[(npoly, nface)]
new_facing_trans, facing_area = new_face_trans_dict_copy[(ipoly, iface)]
fact = (new_face_trans + new_facing_trans)/2
new_face_trans_dict[(npoly, nface)] = (new_face_trans - fact, face_area)
except KeyError:
# presumably this face does not have a match on another polygon
pass
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
shelf = []
for nface in range(NFACE):
new_face_trans, face_area = new_face_trans_dict[(npoly, nface)]
shelf.append(new_face_trans)
new_conv = np.array(shelf).sum()
new_poly_conv_dict[npoly] = (new_conv, poly_area, poly_zarea, net_face_area, NFACE)
face_trans_dict = new_face_trans_dict.copy()
poly_conv_dict = new_poly_conv_dict.copy()
# finally add the adjustments to the transports in z levels:
face_ztrans_dict = dict()
for npoly in range(NPOLY):
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
orig_face_trans, face_area = orig_face_trans_dict[(npoly, nface)]
DQ = face_trans - orig_face_trans
for nlay in range(NLAY):
(face_ztrans, face_zarea) = orig_face_ztrans_dict[(npoly, nface, nlay)]
if face_area > 0:
adj = DQ * face_zarea / face_area
else:
adj = 0.
face_ztrans_dict[(npoly, nface, nlay)] = (face_ztrans + adj, face_zarea)
# calculate w at interfaces and check that they are reasonable
# RESULT: they seem good
wz_dict = dict()
# each entry in wz_dict is a tuple of (w, area) at the TOP of a layer
print_info = False
for npoly in range(NPOLY):
if print_info:
print('\nnpoly = %d' % (npoly))
cz = np.zeros(NLAY)
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
(face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cz[nlay] += face_ztrans
czr = cz[::-1] # packed bottom to top
cczr = np.cumsum(czr)
ccz = cczr[::-1]
# ccz is the vertical transport through the UPPER boundary of each layer
# packed top to bottom like all other z-layer variables
for nlay in range(NLAY):
if nlay == 0: # get the associated horizontal area
this_zarea = poly_area
else:
this_zarea = poly_zarea[nlay-1]
try: # accout for the few cases where an area might be a masked constant
if this_zarea.mask == True:
this_zarea = 0.
except AttributeError:
pass
# calculate the vertical velocity through the layer
# note that the transport throught the deepest level (-350 m) is ZERO
# and the uppermost w is also ~zero because of our iterative correction
if (this_zarea == 0.):
wz_dict[(npoly, nlay)] = (0., 0.)
else:
wz_dict[(npoly, nlay)] = (ccz[nlay] / this_zarea, this_zarea)
if print_info:
print(' z = %4d w = %10.1f (mm/hour)' %
(z_dict[nlay], 3600*1000*wz_dict[(npoly, nlay)][0]))
# save the results for this day
pickle.dump(face_trans_dict, open(out_dir+f_string+'_face_trans.p', 'wb'))
pickle.dump(face_ztrans_dict, open(out_dir+f_string+'_face_ztrans.p', 'wb'))
pickle.dump(poly_conv_dict, open(out_dir+f_string+'_poly_conv.p', 'wb'))
pickle.dump(wz_dict, open(out_dir+f_string+'_wz.p', 'wb'))
# check that the net convergence is still small
# RESULT: it is very,very small
zconv = []
pconv = []
for npoly in range(NPOLY):
cc = 0.
net_conv, poly_area, poly_zarea, net_face_area, NFACE = poly_conv_dict[npoly]
for nface in range(NFACE):
face_trans, face_area = face_trans_dict[(npoly, nface)]
for nlay in range(NLAY):
| (face_ztrans, face_zarea) = face_ztrans_dict[(npoly, nface, nlay)]
cc += face_ztrans | conditional_block |
|
mpcs_app.py | password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def get_annotation_details(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'], | 'Key': resultfile[0] + '/' + resultfile[1]
}
) | random_line_split |
|
mpcs_app.py |
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def get_annotation_details(job_id):
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current user name
username = auth.current_user.username
res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
items = res['Items']
download_url = ''
# Construct a signed download URL for user to download result file from s3 bucket
if items[0]['job_status'] != 'RUNNING':
resultfile = items[0]['s3_key_result_file'].split('~')
client = boto3.client('s3')
download_url = client.generate_presigned_url(
ClientMethod='get_object',
Params = {
'Bucket': request.app.config['mpcs.aws.s3.results_bucket'],
'Key': resultfile[0] + '/' + resultfile[1]
}
)
# Display annotation detail for specified job
current_time = 0
# Check if the job is still running
if items[0]['job_status'] == 'RUNNING':
new_link = 2
# Check if the given username match the username within database
if username == items[0]['username']: | conditional_block |
||
mpcs_app.py | mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False)
'''
*******************************************************************************
Display details of a specific annotation job
*******************************************************************************
'''
@route('/annotations/<job_id>', method='GET', name="annotation_details")
def | get_annotation_details | identifier_name |
|
mpcs_app.py | def register_confirm(reg_code):
log.info(request.url)
try:
auth.validate_registration(reg_code)
except Exception, error:
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=False, error_message=error)
return template(request.app.config['mpcs.env.templates'] + 'register_confirm',
auth=auth, success=True, error_message=None)
'''
*******************************************************************************
Login, logout, and password reset forms
*******************************************************************************
'''
@route('/login', method='GET', name="login")
def login():
log.info(request.url)
redirect_url = "/"
# If the user is trying to access a protected URL, go there after auhtenticating
if request.query.redirect_url.strip() != "":
redirect_url = request.query.redirect_url
return template(request.app.config['mpcs.env.templates'] + 'login',
auth=auth, redirect_url=redirect_url, alert=False)
@route('/login', method='POST', name="login_submit")
def login_submit():
auth.login(request.POST.get('username'),
request.POST.get('password'),
success_redirect=request.POST.get('redirect_url'),
fail_redirect='/login')
@route('/logout', method='GET', name="logout")
def logout():
log.info(request.url)
auth.logout(success_redirect='/login')
'''
*******************************************************************************
*
CORE APPLICATION CODE IS BELOW...
*
*******************************************************************************
'''
'''
*******************************************************************************
Subscription management handlers
*******************************************************************************
'''
import stripe
# Display form to get subscriber credit card info
@route('/subscribe', method='GET', name="subscribe")
def subscribe():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
return template(request.app.config['mpcs.env.templates'] + 'subscribe', auth=auth, alert=False)
# Process the subscription request
@route('/subscribe', method='POST', name="subscribe_submit")
def subscribe_submit():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
try:
# Extract the Stripe token from submited form -- stripe_token
stripe.api_key = request.app.config['mpcs.stripe.secret_key']
token = request.POST['stripe_token']
# Create a premium customer subscribing to premium plan
print 'Welcome to Stripe'
customer = stripe.Customer.create(description=auth.current_user.username, source=token, email=auth.current_user.email_addr)
stripe.Subscription.create(customer=customer.id, plan="premium_plan",)
# Update the user's profile in our user database
auth.current_user.update(role="premium_user")
except stripe.error.CardError, e:
print 'This credit card has been declined'
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
items = res['Items']
client = boto3.client('glacier', region_name = region_name)
# Check if we have any job within our DynamoDB
if len(items) > 0:
for item in items:
# Update the user role to premium in DynamoDB
updateData = ann_table.update_item(
Key={
'job_id': item['job_id']
},
UpdateExpression="set user_role=:a",
ExpressionAttributeValues={
':a': "premium_user"
},
ReturnValues="UPDATED_NEW"
)
# Check if we should initiate archive request
if item['results_file_archive_id'] != 'Not available':
re = client.initiate_job(vaultName='ucmpcs', jobParameters={"Type": "archive-retrieval", "ArchiveId": item['results_file_archive_id'], "SNSTopic": request.app.config['mpcs.aws.sns.glacier_topic'], "Tier": "Expedited"})
return template(request.app.config['mpcs.env.templates'] + 'subscribe_confirm', auth=auth, stripe_id=customer.id, alert=False)
'''
*******************************************************************************
Display the user's profile with subscription link for Free users
*******************************************************************************
'''
@route('/profile', method='GET', name="profile")
def user_profile():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Upgrade link to become a premium user
temp = str(request.url).split('/profile')
upgrade_link = temp[0] + '/subscribe'
return template(request.app.config['mpcs.env.templates'] + 'profile', auth=auth, upgrade_link=upgrade_link, alert=False)
'''
*******************************************************************************
Creates the necessary AWS S3 policy document and renders a form for
uploading an input file using the policy document
*******************************************************************************
'''
@route('/annotate', method='GET', name="annotate")
def upload_input_file():
log.info(request.url)
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Generate unique ID to be used as S3 key (name)
key_name = auth.current_user.username + '/' + str(uuid.uuid4())
# Redirect to a route that will call the annotator
redirect_url = str(request.url) + "/job"
# Get the current time
current = datetime.now(pytz.timezone('US/Central'))
expiration = current + timedelta(hours=24)
expiration = expiration.isoformat()
time = expiration[:23]
time = time + "Z"
# Define the S3 policy doc to allow upload via form POST
policy_document = str({
"expiration": time,
"conditions": [
{"bucket": bucket_name},
["starts-with","$key", request.app.config['mpcs.aws.s3.key_prefix']],
["starts-with", "$success_action_redirect", redirect_url],
{"x-amz-server-side-encryption": encryption},
{"x-amz-security-token": aws_session_token},
{"acl": acl}]})
# Encode the policy document - ensure no whitespace before encoding
policy = base64.b64encode(policy_document.translate(None, string.whitespace))
# Sign the policy document using the AWS secret key
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, hashlib.sha1).digest())
# Render the upload form
return template(request.app.config['mpcs.env.templates'] + 'upload',
auth=auth, bucket_name=bucket_name, s3_key_name=key_name,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token, redirect_url=redirect_url,
encryption=encryption, acl=acl, policy=policy, signature=signature)
'''
*******************************************************************************
Accepts the S3 redirect GET request, parses it to extract
required info, saves a job item to the database, and then
publishes a notification for the annotator service.
*******************************************************************************
'''
@route('/annotate/job', method='GET')
def create_annotation_job_request():
# Check that user is authenticated
auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get bucket name, key, and job ID from the S3 redirect URL
bucket_name = request.query['bucket']
s3key = request.query['key']
# Get the file name
filename = s3key.split("~")[1]
index = s3key.split("~")[0].rindex('/')
job_id = s3key.split("~")[0][index + 1:]
first = s3key.find('/')
second = s3key.rindex('/')
# Create a job item and persist it to the annotations database
data = {
"job_id": job_id,
"username": auth.current_user.username,
"input_file_name": filename,
"s3_inputs_bucket": bucket_name,
"s3_key_input_file": s3key,
"submit_time": int(time.time()),
"job_status": "PENDING",
"user_email_addr": auth.current_user.email_addr,
"user_role": auth.current_user.role
}
# Insert the new data into data table
ann_table.put_item(Item=data)
# Publish a notification message to the SNS topic
client = boto3.client('sns', region_name = region_name)
response_notification = client.publish(
TopicArn = job_request_topic,
Message = json.dumps(data)
)
# Render upload_confirm template
return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)
'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
# Check that user is authenticated
| auth.require(fail_redirect='/login?redirect_url=' + request.url)
# Get the current username
username = auth.current_user.username
res = ann_table.query(
IndexName='username_index',
KeyConditionExpression=Key('username').eq(username))
# Get all the relevant detail about current user
items = res['Items']
# Modify the date and time format that is rendered into template file
result_data = list()
for item in items:
item['submit_time'] = datetime.fromtimestamp(int(item['submit_time'])).strftime('%Y-%m-%d %H:%M')
result_data.append(item)
# Render myannotations template
return template(request.app.config['mpcs.env.templates'] + 'myannotations', auth=auth, items=result_data, alert=False) | identifier_body |
|
blockchain.go | (address string) *BlockChain {
//return &BlockClain{
// []*Block{genesisBlock},
//}
var lastHash []byte
db, err := bolt.Open(BlockChainDB, 0600, nil)
//defer db.Close()
if err != nil {
log.Fatal("create database failed")
}
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
bucket,err = tx.CreateBucket([]byte(BlockBucket))
if err != nil{
log.Fatal("create bucket failed")
}
//Create genesis block
genesisBlock := GenesisBlock(address)
//Write message into database
bucket.Put(genesisBlock.Hash,genesisBlock.Serialize())
bucket.Put([]byte("LastHashKey"),genesisBlock.Hash)
lastHash = genesisBlock.Hash
}else{
lastHash = bucket.Get([]byte("LastHashKey"))
}
return nil
})
return &BlockChain{db,lastHash}
}
// GenesisBlock create a genesisiBlock
func GenesisBlock(address string) *Block {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// AddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount | NewBlockChain | identifier_name |
|
blockchain.go | xs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
| if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
f
unc (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// | //找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input | conditional_block |
blockchain.go | ddBlock 6.add a new block
func (bc *BlockChain)AddBlock(txs []*Transaction) {
for _, tx := range txs{
if !bc.VerifyTransaction(tx) {
fmt.Println("校验交易失败")
return
}
}
//found the last block's hash
lastHash := bc.tail
db := bc.db
//create a new block
//send the new block into the blockchain
db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BlockBucket))
if bucket == nil{
log.Fatal("no bucket")
}else{
//Write message into database
block := NewBlock(txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKey | {
coinBase := NewCoinbaseTX(address, "创世块")
coinBases := []*Transaction{coinBase}
return NewBlock(coinBases, []byte{})
}
// A | identifier_body |
|
blockchain.go | (txs, lastHash)
bucket.Put(block.Hash,block.Serialize())
bucket.Put([]byte("LastHashKey"),block.Hash)
//update the last hash
bc.tail = block.Hash
}
return nil
})
}
//正向打印区块链
func (bc *BlockChain) Printchain() {
bcI := bc.NewIterator()
var blockHeight int
var blocks []*Block
for {
block := bcI.Next()
blocks = append(blocks, block)
if block.PrevHash == nil {
break
}
}
for i := len(blocks) - 1; i > -1; i--{
timeFormat := time.Unix(int64(blocks[i].TimeStamp), 0).Format("2006-01-02 15:04:05")
fmt.Printf("=============== 区块高度: %d ==============\n", blockHeight)
fmt.Printf("版本号: %d\n", blocks[i].Version)
fmt.Printf("前区块哈希值: %x\n", blocks[i].PrevHash)
fmt.Printf("梅克尔根: %x\n", blocks[i].Merkel)
fmt.Printf("时间戳: %s\n", timeFormat)
fmt.Printf("难度值: %d\n", blocks[i].Difficulty)
fmt.Printf("随机数 : %d\n", blocks[i].Nonce)
fmt.Printf("当前区块哈希值: %x\n", blocks[i].Hash)
fmt.Printf("区块数据 :%s\n", blocks[i].Transaction[0].TXInputs[0].PubKey)
blockHeight++
}
}
//找到指定地址所有的UTXO,即未消费的
func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
var UTXO []TXOuput
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
/*
//找错误, continue只能跳出最近的for循环
fmt.Println(j)
fmt.Println(i)
var a bool
a = int64(i) == j
fmt.Println(a)
*/
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(output.PubKeyHash,pubKeyHash){
//fmt.Println(output)
UTXO = append(UTXO, output)
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),pubKeyHash){
//key为签名的那个交易
//indexArray := spentOutput[string(input.TXID)]
// //这个数组为签名的那个交易中 已经消费过的output的index值
//indexArray = append(indexArray, input.Index)
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
//fmt.Println("===========")
//fmt.Printf("%x\n", input.TXID)
//fmt.Println(spentOutput[string(input.TXID)])
//fmt.Println("===========")
}
}
}
}
if len(block.PrevHash) == 0 {
break | return UTXO
}
//找到指定地址所有的UTXO,即未消费的,优化上面函数
//func (bc *BlockChain)FindUTXOs(pubKeyHash []byte) []TXOuput {
// var UTXO []TXOuput
// txs := bc.FindUTXOsBased(pubKeyHash)
// for _, tx := range txs{
// for _, output := range tx.TXOuputs{
// if bytes.Equal(pubKeyHash,output.PubKeyHash){
// UTXO = append(UTXO, output)
// }
// }
// }
// return UTXO
//}
//FindNeedUTXOs 根据需求找到合理的utxo集合返回,格式 map[string][]in64 即map[Transaction.TXID] {合适的output的index}
func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
//合理utxo集合
utxos := make(map[string][]int64)
//找到钱的总数
var cacl float64
//=================================
//定义一个map来保存消费过的output, key为这个消费过的output的交易id,value值为这个交易中索引的数组
spentOutput := make(map[string][]int64)
// 遍历input,找到自己花费过的utxo的集合
//创建迭代器
it := bc.NewIterator()
//遍历区块
for {
block := it.Next()
//遍历区块中的每笔交易
for _, transaction := range block.Transaction{
//遍历output,添加该地址有关的到返回的utxo中
//这里的i为outputs的下标
OUTPUT:
for i, output := range transaction.TXOuputs{
//过滤,已经消费过的output不用添加进去
if spentOutput[string(transaction.TXID)] != nil{
for _, j := range spentOutput[string(transaction.TXID)]{
//标识过下标和循环中的下标对比, 过滤到已经消费的output
if int64(i) == j{
continue OUTPUT
}
}
}
if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
//将utxo加进来,统计总额,比较是否是否满足转账需求
// 满足则退出并返回
//fmt.Println(output)
if cacl < amount {
//统计金额
cacl += output.Value
//将对应交易号及output的index添加进map
//array := utxos[string(transaction.TXID)]
//array = append(array, int64(i))
utxos[string(transaction.TXID)] = append(utxos[string(transaction.TXID)], int64(i))
if cacl >= amount{
fmt.Printf("找到满足的金额%f\n", cacl)
return utxos, cacl
}
}
}
}
//挖矿交易没有input
if !transaction.IsCoinbase(){
//遍历input,找到花费过的utxo的集合
for _, input := range transaction.TXInputs{
if bytes.Equal(HashPubKey(input.PubKey),senderPubKeyHash){
/*
//key为签名的那个交易
indexArray := spentOutput[string(input.TXID)]
//这个数组为签名的那个交易中 已经消费过的output的index值
indexArray = append(indexArray, input.Index)
*/
spentOutput[string(input.TXID)] = append(spentOutput[string(input.TXID)], input.Index)
}
}
}
}
if len(block.PrevHash) == 0 {
//fmt.Println("遍历结束")
break
}
}
//=================================
return utxos, cacl
}
//func (bc *BlockChain)FindNeedUTXOs(senderPubKeyHash []byte, amount float64) (map[string][]int64, float64){
// //合理utxo集合
// utxos := make(map[string][]int64)
// //找到钱的总数
// var cacl float64
//
// txs := bc.FindUTXOsBased(senderPubKeyHash)
// for _, tx := range txs{
// for i, output := range tx.TXOuputs{
// //if from == output.PubKeyHash{
// //两个byte数组相比
// if bytes.Equal(senderPubKeyHash, output.PubKeyHash){
// //将utxo加进来,统计总额,比较是否是否满足转账需求
// // 满足则退出并返回
// if cacl < amount {
// //统计金额
// cacl | }
} | random_line_split |
app.py | ) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
|
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit | return {'Error': 'This email has already been used to register'} | conditional_block |
app.py | ) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
| level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit | try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no') | identifier_body |
app.py | ('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def | update_calendar | identifier_name |
|
app.py | ) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
| try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = | # Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id): | random_line_split |
DQN_Snake.py | import matplotlib.pyplot as plt
from BrainDQN_Nature import BrainDQN
##################################################################################################################
##################################################################################################################
import random, pygame
from pygame.locals import *
FPS = 200 # 螢幕刷新率(在這裡相當於貪吃蛇的速度)
WINDOWWIDTH = 300 # 螢幕寬度
WINDOWHEIGHT = 300 # 螢幕高度
CELLSIZE = 20 # 小方格的大小
ALIVE_REWARD = 0 #-0.05 #存活獎勵
WIN_REWARD = 1 #獎勵
LOSE_REWARD = -1 #懲罰
# 斷言,螢幕的寬和高必須能被方塊大小整除
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
# 橫向和縱向的方格數
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# 定義幾個常用的顏色
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
# 定義貪吃蛇的動作
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
# 神經網路的輸出
MOVE_STAY = [1, 0, 0, 0, 0]
MOVE_UP = [0, 1, 0, 0, 0]
MOVE_DOWN = [0, 0, 1, 0, 0]
MOVE_LEFT = [0, 0, 0, 1, 0]
MOVE_RIGHT = [0, 0, 0, 0, 1]
class Game(object):
def __init__(self):
# 定義全域變數
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪 | random_line_split |
||
DQN_Snake.py | global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT | reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True:
| :
terminal=True
| conditional_block |
DQN_Snake.py | global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BAS | andom.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RIGHT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while True | ICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = r | identifier_body |
DQN_Snake.py | FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init() # 初始化pygame
FPSCLOCK = pygame.time.Clock() # 獲得pygame時鐘
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) # 設置螢幕寬高
BASICFONT = pygame.font.Font('freesansbold.ttf', 18) # BASICFONT
pygame.display.set_caption('Greedy Snake') # 設置視窗的標題
self.HEAD = 0 # syntactic sugar: index of the worm's head # 貪吃蛇的頭()
self.Bodylen=3
#showStartScreen() # 顯示開始畫面
self.runGame()
def getRandomLocation(self): # 隨機生成一個座標位置
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def runGame(self):
# 隨機初始化設置一個點作為貪吃蛇的起點
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
# 以這個點為起點,建立一個長度為3格的貪吃蛇(陣列)
self.wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
self.direction = RIGHT # 初始化一個運動的方向
self.apple = self.getRandomLocation() # 隨機一個apple的位置
# 根據 wormCoords 陣列繪製貪吃蛇
def drawWorm(self,wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
# 根據 coord 繪製 apple
def drawApple(self,coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect,8)
# 繪製所有的方格
def drawGrid(self):
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
def gen_action(self,optfromNN):
if optfromNN[0]==1: return MOVE_STAY
elif optfromNN[1]==1: return MOVE_UP
elif optfromNN[2]==1: return MOVE_DOWN
elif optfromNN[3]==1: return MOVE_LEFT
elif optfromNN[4]==1: return MOVE_RIGHT
def step(self, action):
pygame.display.update()
terminal=False
reward=0
if action==MOVE_LEFT and self.direction!=LEFT and self.direction!=RIGHT : self.direction = LEFT
elif action==MOVE_RI | lf.direction!=LEFT and self.direction!=RIGHT : self.direction = RIGHT
elif action==MOVE_UP and self.direction!=UP and self.direction!=DOWN: self.direction = UP
elif action==MOVE_DOWN and self.direction!=UP and self.direction!=DOWN: self.direction = DOWN
elif action==MOVE_STAY : pass
# 檢查貪吃蛇是否撞到撞到邊界
if self.wormCoords[self.HEAD]['x'] == -1 or self.wormCoords[self.HEAD]['x'] == CELLWIDTH or self.wormCoords[self.HEAD]['y'] == -1 or self.wormCoords[self.HEAD]['y'] == CELLHEIGHT:
terminal=True
reward=LOSE_REWARD
print ("撞牆死....")
for wormBody in self.wormCoords[1:]: # 檢查貪吃蛇是否撞到自己
if wormBody['x'] == self.wormCoords[self.HEAD]['x'] and wormBody['y'] == self.wormCoords[self.HEAD]['y']:
terminal=True
reward=LOSE_REWARD
print ("撞自己死....")
break
if terminal==False:
# 檢查貪吃蛇是否吃到apple
if self.wormCoords[self.HEAD]['x'] == self.apple['x'] and self.wormCoords[self.HEAD]['y'] == self.apple['y']:
self.apple = self.getRandomLocation() # 重新隨機生成一個apple # 不移除蛇的最後一個尾巴格
reward=WIN_REWARD
self.Bodylen+=1
else: #沒吃到apple也是要給予存活獎勵
reward=ALIVE_REWARD/self.Bodylen
del self.wormCoords[-1] # 移除蛇的最後一個尾巴格
# 根據方向,添加一個新的蛇頭,以這種方式來移動貪吃蛇
if self.direction == UP:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] - 1}
elif self.direction == DOWN:
newHead = {'x': self.wormCoords[self.HEAD]['x'], 'y': self.wormCoords[self.HEAD]['y'] + 1}
elif self.direction == LEFT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] - 1, 'y': self.wormCoords[self.HEAD]['y']}
elif self.direction == RIGHT:
newHead = {'x': self.wormCoords[self.HEAD]['x'] + 1, 'y': self.wormCoords[self.HEAD]['y']}
self.wormCoords.insert(0, newHead) # 插入新的蛇頭在陣列的最前面
#drawScore(len(self.wormCoords) - 3) # 繪製分數(分數為貪吃蛇陣列當前的長度-3)
DISPLAYSURF.fill(BGCOLOR) # 繪製背景
self.drawGrid() # 繪製所有的方格
self.drawWorm(self.wormCoords) # 繪製貪吃蛇
self.drawApple(self.apple) # 繪製apple
pygame.display.update() # 更新螢幕
FPSCLOCK.tick(FPS) # 設置幀率
if terminal==True:
gameOverFont = pygame.font.Font('freesansbold.ttf', 40)
gameOverSurf = gameOverFont.render('Game Over', True, WHITE)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2-gameOverRect.height-10)
DISPLAYSURF.blit(gameOverSurf, gameOverRect)
# 獲得遊戲畫面的影像
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回遊戲畫面和對應的賞罰
return screen_image,reward, terminal
##################################################################################################################
##################################################################################################################
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
#plt.imshow(observation, cmap ='gray'); plt.show();
return np.reshape(observation,(80,80,1))
def playGame():
# Step 0: Define reort
win = 0
lose = 0
points = 0
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions)
# Step 2: init Game
bg = Game()
# Step 3: play game
# Step 3.1: obtain init state
action0 = bg.gen_action([1,0,0,0,0]) # do nothing
observation0, reward0, terminal = bg.step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# Step 3.2: run the game
while | GHT and se | identifier_name |
JS_Exercises.js | ["Volvo", "Jeep", "Mercedes"];
// cars[0] = "Ford";
// console.log(cars[0]);
// JS Arrays 3-1: Alert the number of items in an array, using the correct Array property.
// const cars = ["Volvo", "Jeep", "Mercedes"];
// alert(cars.length);
// JS Array Methods 1-1: Use the correct Array method to remove the last item of the fruits array.
// const fruits = ["Banana", "Orange", "Apple"];
// fruits.pop();
// fruits.splice(-1);
// console.log(fruits);
// JS Array Methods 2-1: Use the correct Array method to add "Kiwi" to the fruits array.
// const fruits = ["Banana", "Orange", "Apple"];
// fruits.push("Kiwi")
// console.log(fruits);
// JS Array Methods 3-1: Use the splice() method to remove "Orange" and "Apple" from fruits.
// const fruits = ["Banana", "Orange", "Apple", "Kiwi"];
// fruits.splice(1, 2);
// console.log(fruits);
// JS Array Sort 1-1: Use the correct Array method to sort the fruits array alphabetically.
// const fruits = ["Banana", "Orange", "Apple", "Kiwi"];
// fruits.sort();
// console.log(fruits);
// JS Dates 1-1: Create a Date object and alert the current date and time.
// const d = new Date();
// console.log(d);
// alert(d);
// JS Dates 2-1: Use the correct Date method to extract the year (four digits) out of a date object.
// const d = new Date();
// year = d.getFullYear();
// console.log(year);
// JS Dates 3-1: Use the correct Date method to get the month (0-11) out of a date object.
// const d = new Date();
// month = d.getMonth();
// console.log(month);
// JS Dates 4-1: Use the correct Date method to set the year of a date object to 2020.
// const d = new Date();
// d.setFullYear(2020);
// console.log(d);
// JS Math 1-1: Use the correct Math method to create a random number.
// let r = Math.random();
// console.log(r);
// JS Math 2-1: Use the correct Math method to return the largest number of 10 and 20.
// let x = Math.max(10, 20);
// console.log(x);
// JS Math 3-1: Use the correct Math method to round a number to the nearest integer.
// let x = Math.round(5.3);
// console.log(x);
// JS Math 4-1: Use the correct Math method to get the square root of 9.
// let x = Math.sqrt(9);
// console.log(x);
// JS Comparisons 1-1: Choose the correct comparison operator to alert true, when x is greater than y.
// let x = 10;
// let y = 5;
// alert(x > y);
// console.log(x > y);
// JS Comparisons 2-1: Choose the correct comparison operator to alert true, when x is equal to y.
// let x = 10;
// let y = 10;
// console.log(x == y);
// alert(x == y);
// JS Comparisons 3-1: Choose the correct comparison operator to alert true, when x is NOT equal to y.
// let x = 10;
// let y = 5;
// console.log(x != y);
// alert(x != y);
// JS Comparisons 4-1: Choose the correct conditional (ternary) operator to alert "Too young" if age is less than 18, otherwise alert "Old enough".
// var age = 20;
// var voteable = (age < 18) ? "Too young" : "Old enough";
// console.log(voteable);
// JS Conditions 1-1: Fix the if statement to alert "Hello World" if x is greater than y.
// var x = 100;
// var y = 15;
// if (x > y) {
// alert("Hello World");
// };
// JS Conditions 2-1: Fix the if statement to alert "Hello World" if x is greater than y, otherwise alert "Goodbye".
// var x = 8;
// var y = 9;
// if (x > y) {
// alert("Hello World");
// } else {
// alert("Goodbye");
// }
// JS Switch 1-1: Create a switch statement that will alert "Hello" if fruits is "banana", and "Welcome" if fruits is "apple".
// fruits = "Apples";
// if (fruits == "Banana") {
// console.log("Hello1");
// } else if (fruits == "Apple") {
// console.log("Welcome1");
// } else {
// console.log("No Match");
// }
// switch(fruits) {
// case "Banana":
// console.log("Hello")
// break;
// case "Apple":
// console.log("Welcome")
// break;
// }
// JS Switch 2-1: Add a section that will alert("Neither") if fruits is neither "banana" nor "apple".
// fruits = "Orange";
// switch (fruits) {
// case "Banana":
// console.log("Match B")
// break;
// case "Apple":
// console.log("Match A")
// break;
// default:
// console.log("Neither")
// }
// JS For Loops 1-1: Create a loop that runs from 0 to 9.
// let i;
// for (i = 0; i < 10; i++) {
// console.log(i);
// }
// JS For Loops 2-1: Create a loop that runs through each item in the fruits array.
// const fruits = ["Apple", "Banana", "Orange"];
// for (x in fruits) {
// console.log(x);
// }
// JS While Loops 1-1: Create a loop that runs as long as i is less than 10.
// let i = 0;
// while (i < 10) {
// console.log(i);
// i++
// };
// JS While Loops 2-1: Create a loop that runs as long as i is less than 10, but increase i with 2 each time.
// let i = 0;
// while (i < 10) {
// console.log(i);
// i = i + 2;
// };
// JS Break Loops 1-1: Make the loop stop when i is 5.
// for (i = 0; i < 10; i++) {
// console.log(i);
// if (i == 5) {
// break;
// }
// }
// JS Break Loops 2-1: Make the loop jump to the next iteration when i is 5.
// for (i = 0; i < 10; i++) {
// if (i ==5) {
// continue;
// }
// console.log(i);
// }
// JS HTML DOM 1-1: Use the getElementById method to find the <p> element, and change its text to "Hello".
/* <p id="demo">This is the text</p>
<script>document.getElementById("demo").innerHTML = "Hello";</script> */
// JS HTML DOM 2-1: Use the getElementsByTagName method to find the first <p> element, and change its text to "Hello".
/* <p id="demo">This is the text</p>
<script>document.getElementsByTagName("p")[0].innerHTML = "Hello";</script> */
// JS HTML DOM 3-1: Change the text of the first element that has the class name "test".
/* <div class="test">next exercise</div>
<script>document.getElementsByClassName("test")[0].innerHTML = "Hello";</script> */
// JS HTML DOM 4-1: Use HTML DOM to change the value of the image's src attribute.
/* <img id="image" src="smiley.gif">
<script>document.getElementById("image").src = "pic_mountain.jpg";</script> */
// JS HTML DOM 5-1: Use HTML DOM to change the value of the input field.
/* <input type="text" id="myText" value="Hello">
<script>document.getElementById("myText").value = "Have a nice day!";</script> */
// JS HTML DOM 6-1: Change the text color of the <p> element to "red".
/* <p id="demo">this is a paragraph</p>
<script>document.getElementById("demo").style.color = "red";</script> */
// JS HTML DOM 7-1: Change the font size of the p element to 40 pixels.
/* <p id="demo">Howdy World!!</p>
<script>document.getElementById("demo").style.fontSize = "40px";</script> */
// JS HTML DOM 8-1: Use the CSS display property to hide the p element.
/* <p id="demo">Still adding gibberish</p>
<script>document.getElementById("demo").style.display = "none";</script> */ | // JS HTML DOM 9-1: Use the eventListener to assign an onclick event to the <button> element. | random_line_split |
|
http_ece.rs | _key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 {
return Err(WebPushError::PayloadTooLarge);
}
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
) | }
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
}
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
| random_line_split |
|
http_ece.rs | _info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap();
let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let mut payload = "This is test data. XXX".as_bytes().to_vec();
http_ece
.aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload)
.unwrap();
assert_eq!(
"tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44",
base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD)
);
}
#[test]
fn test_headers_with_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let vapid_signature = VapidSignature {
auth_t: String::from("foo"),
auth_k: String::from("bar"),
};
let http_ece = HttpEce::new(
ContentEncoding::AesGcm,
&p256dh,
&auth,
Some(vapid_signature),
);
assert_eq!(
vec![
("Authorization", "WebPush foo".to_string()),
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn test_headers_without_vapid() {
let as_pubkey =
base64::decode_config(
"BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=",
URL_SAFE
).unwrap();
let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap();
let p256dh =
base64::decode_config(
"BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE
).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
assert_eq!(
vec![
("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()),
("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())],
http_ece.generate_headers(&as_pubkey, &salt_bytes))
}
#[test]
fn | test_front_pad | identifier_name |
|
http_ece.rs | _key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 {
return Err(WebPushError::PayloadTooLarge);
}
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
)
}
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> |
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap | {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
} | identifier_body |
http_ece.rs | _key` is the `p256dh` and `peer_secret` the `auth` from
/// browser subscription info.
pub fn new(
encoding: ContentEncoding,
peer_public_key: &'a [u8],
peer_secret: &'a [u8],
vapid_signature: Option<VapidSignature>,
) -> HttpEce<'a> {
HttpEce {
rng: rand::SystemRandom::new(),
peer_public_key: peer_public_key,
peer_secret: peer_secret,
encoding: encoding,
vapid_signature: vapid_signature,
}
}
/// Encrypts a payload. The maximum length for the payload is 3800
/// characters, which is the largest that works with Google's and Mozilla's
/// push servers.
pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> {
if content.len() > 3052 |
let private_key =
agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?;
let public_key = private_key.compute_public_key()?;
let mut salt_bytes = [0u8; 16];
self.rng.fill(&mut salt_bytes)?;
let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key);
agreement::agree_ephemeral(
private_key,
&peer_public_key,
WebPushError::Unspecified,
|shared_secret| match self.encoding {
ContentEncoding::AesGcm => {
let mut payload = vec![0; 3054];
front_pad(content, &mut payload);
self.aes_gcm(
shared_secret,
public_key.as_ref(),
&salt_bytes,
&mut payload,
)?;
Ok(WebPushPayload {
content: payload.to_vec(),
crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes),
content_encoding: "aesgcm",
})
}
ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented),
},
)
}
pub fn generate_headers(
&self,
public_key: &'a [u8],
salt: &'a [u8],
) -> Vec<(&'static str, String)> {
let mut crypto_headers = Vec::new();
let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD));
if let Some(ref signature) = self.vapid_signature {
crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k);
let sig_s: String = signature.into();
crypto_headers.push(("Authorization", sig_s));
};
crypto_headers.push(("Crypto-Key", crypto_key));
crypto_headers.push((
"Encryption",
format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)),
));
crypto_headers
}
/// The aesgcm encrypted content-encoding, draft 3.
pub fn aes_gcm(
&self,
shared_secret: &'a [u8],
as_public_key: &'a [u8],
salt_bytes: &'a [u8],
payload: &'a mut Vec<u8>,
) -> Result<(), WebPushError> {
let mut context = Vec::with_capacity(140);
context.extend_from_slice("P-256\0".as_bytes());
context.push((self.peer_public_key.len() >> 8) as u8);
context.push((self.peer_public_key.len() & 0xff) as u8);
context.extend_from_slice(self.peer_public_key);
context.push((as_public_key.len() >> 8) as u8);
context.push((as_public_key.len() & 0xff) as u8);
context.extend_from_slice(as_public_key);
let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret);
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes);
let EceKey(prk) = client_auth_secret
.extract(shared_secret)
.expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32))
.unwrap()
.into();
let mut cek_info = Vec::with_capacity(165);
cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes());
cek_info.extend_from_slice(&context);
let EceKey(content_encryption_key) = salt
.extract(&prk)
.expand(&[&cek_info], EceKey(16))
.unwrap()
.into();
let mut nonce_info = Vec::with_capacity(164);
nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes());
nonce_info.extend_from_slice(&context);
let EceKey(nonce_bytes) = salt
.extract(&prk)
.expand(&[&nonce_info], EceKey(12))
.unwrap()
.into();
let mut nonce = EceNonce::default();
nonce.fill(nonce_bytes);
let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?;
let mut sealing_key = aead::SealingKey::new(unbound_key, nonce);
sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?;
Ok(())
}
}
fn front_pad(payload: &[u8], output: &mut [u8]) {
let payload_len = payload.len();
let max_payload = output.len() - 2;
let padding_size = max_payload - payload.len();
output[0] = (padding_size >> 8) as u8;
output[1] = (padding_size & 0xff) as u8;
for i in 0..payload_len {
output[padding_size + i + 2] = payload[i];
}
}
#[cfg(test)]
mod tests {
use base64::{self, URL_SAFE, URL_SAFE_NO_PAD};
use crate::error::WebPushError;
use crate::http_ece::{front_pad, ContentEncoding, HttpEce};
use crate::vapid::VapidSignature;
#[test]
fn test_payload_too_big() {
let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let content = [0u8; 3801];
assert_eq!(
Err(WebPushError::PayloadTooLarge),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aes128gcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None);
let content = [0u8; 10];
assert_eq!(
Err(WebPushError::NotImplemented),
http_ece.encrypt(&content)
);
}
#[test]
fn test_aesgcm() {
let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8",
URL_SAFE).unwrap();
let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap();
let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None);
let shared_secret =
base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE)
.unwrap | {
return Err(WebPushError::PayloadTooLarge);
} | conditional_block |
weights_manager.py | _stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def forward(self, inputs, edge_arch):
outputs = []
for op, use_op in zip(self.ops, edge_arch):
| if use_op != 0:
outputs.append(op(inputs)) | conditional_block |
|
weights_manager.py | # type: Layer2Rollout
def begin_virtual(self):
|
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
| raise NotImplementedError() | identifier_body |
weights_manager.py | num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity()
self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
out = sum(node_outputs)
else:
raise ValueError("Unknown cell output op `{}`".format(self.output_op))
if self.shortcut is not None:
out += self.shortcut(inputs)
return out
class Layer2MicroEdge(nn.Module):
def __init__(self, primitives, in_channels, out_channels, stride, affine):
super().__init__()
assert "none" not in primitives, "Edge should not have `none` primitive"
self.ops = nn.ModuleList(
ops.get_op(prim)(in_channels, out_channels, stride, affine)
for prim in primitives
)
def | forward | identifier_name |
|
weights_manager.py | rollout # type: Layer2Rollout
def begin_virtual(self):
raise NotImplementedError()
def forward(self, inputs):
return self.supernet.forward(inputs, self.rollout)
def _forward_with_params(self, *args, **kwargs):
raise NotImplementedError()
def get_device(self):
return self.supernet.device
class Layer2MacroSupernet(BaseWeightsManager, nn.Module):
NAME = "layer2_supernet"
def __init__(
self,
search_space, # type: Layer2SearchSpace
device,
rollout_type="layer2",
init_channels=16,
# classifier
num_classes=10,
dropout_rate=0.0,
max_grad_norm=None,
# stem
use_stem="conv_bn_3x3",
stem_stride=1,
stem_affine=True,
stem_multiplier=1,
# candidate
candidate_eval_no_grad=True,
# schedule
schedule_cfg=None,
):
super().__init__(search_space, device, rollout_type, schedule_cfg)
nn.Module.__init__(self)
self.macro_search_space = (
search_space.macro_search_space
) # type: StagewiseMacroSearchSpace
self.micro_search_space = (
search_space.micro_search_space
) # type: DenseMicroSearchSpace
self.num_cell_groups = self.macro_search_space.num_cell_groups
self.cell_layout = self.macro_search_space.cell_layout
self.reduce_cell_groups = self.macro_search_space.reduce_cell_groups
self.max_grad_norm = max_grad_norm
self.candidate_eval_no_grad = candidate_eval_no_grad
# make stem
self.use_stem = use_stem
if not self.use_stem:
c_stem = 3
elif isinstance(self.use_stem, (list, tuple)):
self.stem = []
c_stem = stem_multiplier * init_channels
for i, stem_type in enumerate(self.use_stem):
c_in = 3 if i == 0 else c_stem
self.stem.append(
ops.get_op(stem_type)(
c_in, c_stem, stride=stem_stride, affine=stem_affine
)
)
self.stem = nn.Sequential(*self.stem)
else:
c_stem = stem_multiplier * init_channels
self.stem = ops.get_op(self.use_stem)(
3, c_stem, stride=stem_stride, affine=stem_affine
)
# make cells
self.cells = nn.ModuleList()
num_channels = init_channels
prev_num_channels = c_stem
for i, cg in enumerate(self.cell_layout):
stride = 2 if cg in self.reduce_cell_groups else 1
num_channels *= stride
self.cells.append(
Layer2MicroCell(
prev_num_channels,
num_channels,
stride,
affine=True,
primitives=self.micro_search_space.primitives,
num_steps=self.micro_search_space.num_steps,
num_init_nodes=self.micro_search_space.num_init_nodes,
output_op=self.micro_search_space.concat_op,
postprocess_op="conv_1x1",
cell_shortcut=True,
cell_shortcut_op="skip_connect",
)
)
| self.classifier = nn.Linear(prev_num_channels, num_classes)
self.to(self.device)
def forward(
self,
inputs,
rollout, # type: Layer2Rollout
):
macro_rollout = rollout.macro # type: StagewiseMacroRollout
micro_rollout = rollout.micro # type: DenseMicroRollout
overall_adj = self.macro_search_space.parse_overall_adj(macro_rollout)
# all cell outputs + input/output states
states = [None] * (len(self.cells) + 2) # type: list[torch.Tensor]
if self.use_stem:
states[0] = self.stem(inputs)
else:
states[0] = inputs
assert len(states) == len(overall_adj)
for to, froms in enumerate(overall_adj):
froms = np.nonzero(froms)[0]
if len(froms) == 0:
continue # no inputs to this cell
if any(states[i] is None for i in froms):
raise RuntimeError(
"Invalid compute graph. Cell output used before computed"
)
# all inputs to a cell are added
cell_idx = to - 1
cell_input = sum(states[i] for i in froms)
if cell_idx < len(self.cells):
cell_arch = micro_rollout.arch[self.cell_layout[cell_idx]]
states[to] = self.cells[cell_idx].forward(cell_input, cell_arch)
else:
states[to] = cell_input # the final output state
assert states[-1] is not None
out = self.pooling(states[-1]).squeeze()
out = self.dropout(out)
out = self.classifier(out)
return out
def assemble_candidate(self, rollout):
return Layer2CandidateNet(self, rollout, self.candidate_eval_no_grad)
def set_device(self, device):
self.device = device
self.to(device)
def step(self, gradients, optimizer):
self.zero_grad() # clear all gradients
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
# clip the gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
@classmethod
def supported_data_types(cls):
return ["image"]
@classmethod
def supported_rollout_types(cls):
return ["layer2"]
class Layer2MicroCell(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
affine,
primitives,
num_steps,
num_init_nodes,
output_op="concat",
postprocess_op="conv_1x1",
cell_shortcut=False,
cell_shortcut_op="skip_connect",
):
super().__init__()
self.out_channels = out_channels
self.stride = stride
self.primitives = primitives
self.num_init_nodes = num_init_nodes
self.num_nodes = num_steps + num_init_nodes
self.output_op = output_op
# it's easier to calc edge indices with a longer ModuleList and some None
self.edges = nn.ModuleList()
for j in range(self.num_nodes):
for i in range(self.num_nodes):
if j > i:
if i < self.num_init_nodes:
self.edges.append(
Layer2MicroEdge(
primitives, in_channels, out_channels, stride, affine
)
)
else:
self.edges.append(
Layer2MicroEdge(
primitives, out_channels, out_channels, 1, affine
)
)
else:
self.edges.append(None)
if cell_shortcut and cell_shortcut_op != "none":
self.shortcut = ops.get_op(cell_shortcut_op)(
in_channels, out_channels, stride, affine
)
else:
self.shortcut = None
if self.output_op == "concat":
self.postprocess = ops.get_op(postprocess_op)(
out_channels * num_steps, out_channels, stride=1, affine=False
)
def forward(self, inputs, cell_arch):
# cell_arch shape: [#nodes, #nodes, #ops]
n, _, h, w = inputs.shape
node_outputs = [inputs] * self.num_init_nodes
for to in range(self.num_init_nodes, self.num_nodes):
froms = np.nonzero(cell_arch[to].sum(axis=1))[0]
edge_indices = froms + (to * self.num_nodes)
if any(self.edges[i] is None for i in edge_indices):
raise RuntimeError(
"Invalid compute graph in cell. Cannot compute an edge where j <= i"
)
# outputs `to` this node `from` all used edges
edge_outputs = [
self.edges[edge_i](node_outputs[from_i], cell_arch[to, from_i])
for edge_i, from_i in zip(edge_indices, froms)
]
if len(edge_outputs) != 0:
node_outputs.append(sum(edge_outputs))
elif self.output_op == "concat":
# append fake outputs if required by concat
node_outputs.append(
torch.zeros(
n,
self.out_channels,
h // self.stride,
w // self.stride,
device=inputs.device,
)
)
node_outputs = node_outputs[self.num_init_nodes :]
if len(node_outputs) == 0:
# no node outputs (including fake outputs) in this cell
out = 0
elif self.output_op == "concat":
out = self.postprocess(torch.cat(node_outputs, dim=1))
elif self.output_op == "add":
| prev_num_channels = num_channels
# make pooling and classifier
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(dropout_rate) if dropout_rate else nn.Identity() | random_line_split |
downloadtaskmgr.go | *DownloadTask, 1024*5)}, //500KB/s
{SpeedLimitKBs: 1500, CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func | () []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
| GetDownloadTaskList | identifier_name |
downloadtaskmgr.go | Continent string
Country string
Area string
SavePath string
DownloadType string
OriginRegion string
TargetRegion string
}
type TaskStatus string
//const Task_Success TaskStatus = "success"
//const Task_Fail TaskStatus ="fail"
const Task_UnStart TaskStatus = "unstart"
const Task_Break TaskStatus = "break"
const Task_Downloading TaskStatus = "downloading"
type DownloadTask struct {
DownloadInfo
Id uint64
Status TaskStatus
FileSize int64
SpeedKBs float64
DownloadedSize int64
TryTimes int
StartTime int64
ZeroSpeedSec int
DownloadChannel *DownloadChannel
}
type TaskList struct {
TaskInQueue []DownloadTask
}
var currentId uint64
var idLock sync.Mutex
const GlobalDownloadTaskChanSize = 1024 * 10
var globalDownloadTaskChan = make(chan *DownloadTask, GlobalDownloadTaskChanSize)
var onTaskSuccess func(task *DownloadTask)
var onTaskFailed func(task *DownloadTask)
var panicCatcher func()
var onDownloadStart func(task *DownloadTask)
var onDownloading func(task *DownloadTask, usedTimeSec int)
type ExecResult string
const Success ExecResult = "Success"
const Fail ExecResult = "Fail"
const Break ExecResult = "Break"
type DownloadChannel struct {
SpeedLimitKBs int64
CountLimit int
RunningCountControlChan chan bool
IdleChan chan *DownloadTask
}
var DownloadingTaskMap sync.Map
var ChannelRunningSize = []int{10, 6, 3, 3, 2}
var channelArray = []*DownloadChannel{
{SpeedLimitKBs: 30, CountLimit: ChannelRunningSize[0], RunningCountControlChan: make(chan bool, ChannelRunningSize[0]), IdleChan: make(chan *DownloadTask, 1024*5)}, //30KB/s
{SpeedLimitKBs: 100, CountLimit: ChannelRunningSize[1], RunningCountControlChan: make(chan bool, ChannelRunningSize[1]), IdleChan: make(chan *DownloadTask, 1024*5)}, //100KB/s
{SpeedLimitKBs: 500, CountLimit: ChannelRunningSize[2], RunningCountControlChan: make(chan bool, ChannelRunningSize[2]), IdleChan: make(chan *DownloadTask, 1024*5)}, //500KB/s
{SpeedLimitKBs: 1500, CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func GetDownloadTaskList() []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug | type DownloadInfo struct {
TargetUrl string
BindName string
FileName string | random_line_split |
|
downloadtaskmgr.go | CountLimit: ChannelRunningSize[3], RunningCountControlChan: make(chan bool, ChannelRunningSize[3]), IdleChan: make(chan *DownloadTask, 1024*3)}, //1500KB/s
{SpeedLimitKBs: 2500, CountLimit: ChannelRunningSize[4], RunningCountControlChan: make(chan bool, ChannelRunningSize[4]), IdleChan: make(chan *DownloadTask, 1024*3)}, //2500KB/s
}
const NewRunningTaskCount = 7
var newRunningTaskControlChan = make(chan bool, NewRunningTaskCount)
func AddTaskToDownloadingMap(task *DownloadTask) {
DownloadingTaskMap.Store(task.Id, task)
}
func DeleteDownloadingTask(taskid uint64) {
DownloadingTaskMap.Delete(taskid)
}
type BySpeed []*DownloadTask
func (t BySpeed) Len() int { return len(t) }
func (t BySpeed) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t BySpeed) Less(i, j int) bool { return t[i].SpeedKBs < t[j].SpeedKBs }
func LoopScanRunningTask() {
newWaitingTaskCount := len(globalDownloadTaskChan)
//logger.Debug("Download waiting len","len",newWaitingTaskCount)
if newWaitingTaskCount <= 0 {
//logger.Debug("have no new task waiting")
return
}
killCount := 3
if newWaitingTaskCount < killCount {
killCount = newWaitingTaskCount
}
nowTime := time.Now().Unix()
taskReadyToKill := []*DownloadTask{}
DownloadingTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*DownloadTask)
if ok {
//loop find task to kill
if nowTime-task.StartTime < 5 {
return true
}
for _, v := range channelArray {
if task.SpeedKBs < float64(v.SpeedLimitKBs) {
//if task.FileSize>0 {
// finishPercent:=task.DownloadedSize*100/task.FileSize
// if finishPercent>70 {
// break
// }
//}
task.DownloadChannel = v
taskReadyToKill = append(taskReadyToKill, task)
break
}
}
}
return true
})
if len(taskReadyToKill) == 0 {
return
}
sort.Sort(BySpeed(taskReadyToKill))
count := 0
for _, v := range taskReadyToKill {
v.Status = Task_Break
//logger.Debug("Break Task","id",v.Id)
count++
if count >= killCount {
return
}
}
}
func InitTaskMgr(rootPath string) {
LevelDBInit()
for _, v := range channelArray {
for i := 0; i < v.CountLimit; i++ {
v.RunningCountControlChan <- true
}
}
for i := 0; i < NewRunningTaskCount; i++ {
newRunningTaskControlChan <- true
}
//read unfinished task and restart
unFinishedTask := LoopTasksInLDB()
if unFinishedTask == nil {
return
}
for _, v := range unFinishedTask {
info := &DownloadInfo{}
info.TargetUrl = v.TargetUrl
info.BindName = v.BindName
info.FileName = v.FileName
info.Continent = v.Continent
info.Country = v.Country
info.Area = v.Area
info.SavePath = v.SavePath
err := AddGlobalDownloadTask(info)
if err != nil {
logger.Error("Add AddGlobalDownloadTask error")
}
}
}
func AddGlobalDownloadTask(info *DownloadInfo) error {
idLock.Lock()
if currentId >= math.MaxUint64 {
currentId = 0
}
currentId++
idLock.Unlock()
newTask := &DownloadTask{}
newTask.Id = currentId
newTask.TargetUrl = info.TargetUrl
newTask.BindName = info.BindName
newTask.FileName = info.FileName
newTask.Continent = info.Continent
newTask.Country = info.Country
newTask.Area = info.Area
newTask.DownloadType = info.DownloadType
newTask.OriginRegion = info.OriginRegion
newTask.TargetRegion = info.TargetRegion
newTask.SavePath = info.SavePath
newTask.Status = Task_UnStart
newTask.TryTimes = 0
go func() {
//save to LevelDB
SetTaskToLDB(newTask)
//to task channel
globalDownloadTaskChan <- newTask
}()
return nil
}
func SetPanicCatcher(function func()) {
panicCatcher = function
}
func SetOnTaskSuccess(function func(task *DownloadTask)) {
onTaskSuccess = function
}
func SetOnTaskFailed(function func(task *DownloadTask)) {
onTaskFailed = function
}
func SetOnDownloading(function func(task *DownloadTask, usedTimeSec int)) {
onDownloading = function
}
func SetOnDownloadStart(function func(task *DownloadTask)) {
onDownloadStart = function
}
func GetDownloadTaskList() []*DownloadTask {
taskInLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
| select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
}
}
}()
}
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net. | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.