file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
Z_normal_8_2.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt,mpld3
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
from flask import Flask, render_template, request
import math
import itertools
"""------------- Intialization ------------- """
y_alphabet_size=4
word_lenth=3
window_size=10
skip_offset=5
ham_distance=1
epsilon = 1e-6
"""------------- import Data -------------"""
"""
file_name='test_data2.csv'
data2 = pd.read_csv(file_name, sep=',', header=None)
x1 = data2.iloc[1:,1].values.flatten()
x1 = x1.astype(np.float)
"""
data = pd.read_csv('car_sales.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
#os.remove("./Output/sliding_half_segment/")
"""------------- Helper Functions ------------- """
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
"""------------- Y-axis Distribution ------------- """
def break_points_gaussian(size):
options = {
3: np.array([ -0.43, 0.43]),
4: np.array([ -0.67, 0, 0.67]),
5: np.array([ -0.84, -0.25, 0.25, 0.84]),
6: np.array([ -0.97, -0.43, 0, 0.43, 0.97]),
7: np.array([ -1.07, -0.57, -0.18, 0.18, 0.57, 1.07]),
8: np.array([ -1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def break_points_quantiles(size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
|
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
simillar_word=complete_word()
"""------------- Compare Shape Algorithm ------------- """
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
tempp=list()
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
comapre=Compare_Shape()
"""------------- Visualization ------------- """
def visualize(data,alph_size,lent,key):
row=int(lent/4)
print(key)
if(lent > 4):
fig = plt.figure(figsize=(4*row, 5*row))
#ax.set_ylim(-2.5,2.5)
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(row+1, 4,i+1 )
plt.plot(nData)
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(5, 2,i+1 )
plt.plot(nData)
#plt.savefig('./Output/sliding_half_segment/'+key+'.png')
#plt.savefig('books_read.png')
plt.show()
def prep_visualize ():
i=0
simillar_word=complete_word()
sax_keys =list(simillar_word.keys())
sax_values =list(simillar_word.values())
for n_val in sax_values:
key=sax_keys[i]
x2= list();
for n1_val in n_val:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n1_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(n_val ),key)
i=i+1
def prep_visualize1 ():
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_indices.keys())
sax_values =list(compare_indices.values())
for i in range(len(sax_values)):
key=sax_keys[i]
x2= list();
for n_val in sax_values[i][0]:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(sax_values[i][0]),key)
"""------------- Matrix ------------- """
def matrix_calculation (df,key):
df_temp = df.drop(columns=[ 'indexx','simillar_key'])
width=len(df)
s = (width,width)
mat = np.zeros(s)
if(width>=3):
for i in range(len(df)):
for j in range(len(df)):
row1= df_temp.iloc[[i]].values[0]
row2= df_temp.iloc[[j]].values[0]
dist= row1-row2
mat[i][j]=(dist)
dist_array = np.triu(mat, 0)
print(key)
print(dist_array)
alphabetize,indices,feat_vector=segment_ts(x1)
def matrix_prep ():
alphabetize,indices,feat_vector=segment_ts(x1)
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_keys.keys())
sax_values =list(compare_keys.values())
i=0
for n_val in sax_values:
key=sax_keys[i]
temp_df = pd.DataFrame()
index_list=list()
position_list=list()
simillar_key_list=list()
for n1_val in n_val:
print(n1_val)
for index, row in feat_vector.iterrows():
if(row['keys']==n1_val):
# print(row['position'],index)
index_list.append(index)
position_list.append(row['position'])
simillar_key_list.append(n1_val)
temp_df['indexx']=index_list
temp_df['position']=position_list
temp_df['simillar_key']=simillar_key_list
matrix_calculation(temp_df,key)
i=i+1
print("===========================Before Compare Shape============================")
#prep_visualize()
print("===========================After Compare Shape============================")
#prep_visualize1 ()
print("===========================Position Matrix ============================")
matrix_prep()
| mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean | identifier_body |
Z_normal_8_2.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt,mpld3
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
from flask import Flask, render_template, request
import math
import itertools
"""------------- Intialization ------------- """
y_alphabet_size=4
word_lenth=3
window_size=10
skip_offset=5
ham_distance=1
epsilon = 1e-6
"""------------- import Data -------------"""
"""
file_name='test_data2.csv'
data2 = pd.read_csv(file_name, sep=',', header=None)
x1 = data2.iloc[1:,1].values.flatten()
x1 = x1.astype(np.float)
"""
data = pd.read_csv('car_sales.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
#os.remove("./Output/sliding_half_segment/")
"""------------- Helper Functions ------------- """
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
"""------------- Y-axis Distribution ------------- """
def break_points_gaussian(size):
options = {
3: np.array([ -0.43, 0.43]),
4: np.array([ -0.67, 0, 0.67]),
5: np.array([ -0.84, -0.25, 0.25, 0.84]),
6: np.array([ -0.97, -0.43, 0, 0.43, 0.97]),
7: np.array([ -1.07, -0.57, -0.18, 0.18, 0.57, 1.07]),
8: np.array([ -1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def | (size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
simillar_word=complete_word()
"""------------- Compare Shape Algorithm ------------- """
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
tempp=list()
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
comapre=Compare_Shape()
"""------------- Visualization ------------- """
def visualize(data,alph_size,lent,key):
row=int(lent/4)
print(key)
if(lent > 4):
fig = plt.figure(figsize=(4*row, 5*row))
#ax.set_ylim(-2.5,2.5)
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(row+1, 4,i+1 )
plt.plot(nData)
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(5, 2,i+1 )
plt.plot(nData)
#plt.savefig('./Output/sliding_half_segment/'+key+'.png')
#plt.savefig('books_read.png')
plt.show()
def prep_visualize ():
i=0
simillar_word=complete_word()
sax_keys =list(simillar_word.keys())
sax_values =list(simillar_word.values())
for n_val in sax_values:
key=sax_keys[i]
x2= list();
for n1_val in n_val:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n1_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(n_val ),key)
i=i+1
def prep_visualize1 ():
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_indices.keys())
sax_values =list(compare_indices.values())
for i in range(len(sax_values)):
key=sax_keys[i]
x2= list();
for n_val in sax_values[i][0]:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(sax_values[i][0]),key)
"""------------- Matrix ------------- """
def matrix_calculation (df,key):
df_temp = df.drop(columns=[ 'indexx','simillar_key'])
width=len(df)
s = (width,width)
mat = np.zeros(s)
if(width>=3):
for i in range(len(df)):
for j in range(len(df)):
row1= df_temp.iloc[[i]].values[0]
row2= df_temp.iloc[[j]].values[0]
dist= row1-row2
mat[i][j]=(dist)
dist_array = np.triu(mat, 0)
print(key)
print(dist_array)
alphabetize,indices,feat_vector=segment_ts(x1)
def matrix_prep ():
alphabetize,indices,feat_vector=segment_ts(x1)
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_keys.keys())
sax_values =list(compare_keys.values())
i=0
for n_val in sax_values:
key=sax_keys[i]
temp_df = pd.DataFrame()
index_list=list()
position_list=list()
simillar_key_list=list()
for n1_val in n_val:
print(n1_val)
for index, row in feat_vector.iterrows():
if(row['keys']==n1_val):
# print(row['position'],index)
index_list.append(index)
position_list.append(row['position'])
simillar_key_list.append(n1_val)
temp_df['indexx']=index_list
temp_df['position']=position_list
temp_df['simillar_key']=simillar_key_list
matrix_calculation(temp_df,key)
i=i+1
print("===========================Before Compare Shape============================")
#prep_visualize()
print("===========================After Compare Shape============================")
#prep_visualize1 ()
print("===========================Position Matrix ============================")
matrix_prep()
| break_points_quantiles | identifier_name |
genstate.go | // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ygen
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/openconfig/gnmi/ctree"
"github.com/openconfig/goyang/pkg/yang"
"github.com/openconfig/ygot/ygot"
)
// genState is used to store the state that is created throughout the code
// generation and must be shared between multiple entities.
type genState struct {
// definedGlobals specifies the global Go names used during code generation.
definedGlobals map[string]bool
// uniqueDirectoryNames is a map keyed by the path of a YANG entity representing a
// directory in the generated code, whose value is the unique name that it
// was mapped to. This allows routines to determine, based on a particular YANG
// entry, how to refer to it when generating code.
uniqueDirectoryNames map[string]string
// uniqueIdentityNames is a map which is keyed by a string in the form of
// definingModule/identityName which stores the Go anme of the enumerated Go type
// that has been created to represent the identity. This allows de-duplication
// between identityref leaves that reference the same underlying identity. The
// name used includes the defining module to avoid clashes between two identities
// that are named the same within different modules.
uniqueIdentityNames map[string]string
// uniqueEnumeratedTypedefNames is a map, keyed by a synthesised path for the typedef,
// generated in the form definingModule/typedefName, the value stores the Go name of
// the enumeration which represents a typedef that includes an enumerated type.
uniqueEnumeratedTypedefNames map[string]string
// uniqueEnumeratedLeafNames is a map, keyed by a synthesised path to an
// enumeration leaf. The path used reflects the data tree path of the leaf
// within the module that it is defined. That is to say, if a module
// example-module defines a hierarchy of global/config/a-leaf where a-leaf
// is of type enumeration, then the path example-module/global/config/a-leaf
// is used for a-leaf in the uniqueEnumeratedLeafNames. The value of the map
// is the name of the Go enuerated value to which it is mapped. The path based
// on the module is guaranteed to be unique, since we cannot have multiple
// modules of the same name, or multiple identical data tree paths within
// the same module. This path is used since a particular leaf may be re-used
// in multiple places, such that if the entire data tree path is used then
// the names that are generated require deduplication. This approach ensures
// that we have the same enumerated value for a particular leaf in multiple
// contexts.
// At the time of writing, in OpenConfig schemas, this occurs where there is
// a module such as openconfig-bgp which defines /bgp and is also used at
// /network-instances/network-instance/protocols/protocol/bgp.
uniqueEnumeratedLeafNames map[string]string
// schematree stores a ctree.Tree structure that represents the YANG
// schema tree. This is used for lookups within the module set where
// they are required, e.g., for leafrefs.
schematree *ctree.Tree
// uniqueProtoMsgNames is a map, keyed by a protobuf package name, that
// contains a map keyed by protobuf message name strings that indicates the
// names that are used within the generated package's context. It is used
// during code generation to ensure uniqueness of the generated names within
// the specified package.
uniqueProtoMsgNames map[string]map[string]bool
// uniqueProtoPackages is a map, keyed by a YANG schema path, that allows
// a path to be resolved into the calculated Protobuf package name that
// is to be used for it.
uniqueProtoPackages map[string]string
// generatedUnions stores a map, keyed by the output name for a union,
// that has already been output in the generated code. This ensures that
// where two entities re-use a union that has already been created (e.g.,
// a leafref to a union) then it is output only once in the generated code.
generatedUnions map[string]bool
}
// newGenState creates a new genState instance, initialised with the default state
// required for code generation.
func | () *genState {
return &genState{
// Mark the name that is used for the binary type as a reserved name
// within the output structs.
definedGlobals: map[string]bool{
ygot.BinaryTypeName: true,
ygot.EmptyTypeName: true,
},
uniqueDirectoryNames: make(map[string]string),
uniqueEnumeratedTypedefNames: make(map[string]string),
uniqueIdentityNames: make(map[string]string),
uniqueEnumeratedLeafNames: make(map[string]string),
uniqueProtoMsgNames: make(map[string]map[string]bool),
uniqueProtoPackages: make(map[string]string),
generatedUnions: make(map[string]bool),
}
}
// enumeratedUnionEntry takes an input YANG union yang.Entry and returns the set of enumerated
// values that should be generated for the entry. New yang.Entry instances are synthesised within
// the yangEnums returned such that enumerations can be generated directly from the output of
// this function in common with enumerations that are not within a union. The name of the enumerated
// value is calculated based on the original context, whether path compression is enabled based
// on the compressPaths boolean, and whether the name should not include underscores, as per the
// noUnderscores boolean.
func (s *genState) enumeratedUnionEntry(e *yang.Entry, compressPaths, noUnderscores bool) ([]*yangEnum, error) {
var es []*yangEnum
for _, t := range enumeratedUnionTypes(e.Type.Type) {
var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok {
genEnums[en.name] = en
}
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
}
// identityrefBaseTypeFromIdentity takes an input yang.Identity pointer and
// determines the name of the identity used within the generated code for it. The value
// returned is based on the defining module followed by the CamelCase-ified version
// of the identity's name. If noUnderscores is set to false, underscores are omitted
// from the name returned such that the enumerated type name is compliant with
// language styles where underscores are not allowed in names.
func (s *genState) identityrefBaseTypeFromIdentity(i *yang.Identity, noUnderscores bool) string {
definingModName := parentModulePrettyName(i)
// As per a typedef that includes an enumeration, there is a many to one
// relationship between leaves and an identity value, therefore, we want to
// reuse the existing name for the identity enumeration if one exists.
identityKey := fmt.Sprintf("%s/%s", definingModName, i.Name)
if definedName, ok := s.uniqueIdentityNames[identityKey]; ok {
return definedName
}
var name string
if noUnderscores {
name = fmt.Sprintf("%s%s", yang.CamelCase(definingModName), strings.Replace(yang.CamelCase(i.Name), "_", "", -1))
} else {
name = fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(i.Name))
}
// The name of an identityref base type must be unique within the entire generated
// code, so the context of name generation is global.
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueIdentityNames[identityKey] = uniqueName
return uniqueName
}
// resolveEnumName takes a yang.Entry and resolves its name into the type name
// that will be used in the generated code. Whilst a leaf may only be used
// in a single context (i.e., at its own path), resolveEnumName may be called
// multiple times, and hence de-duplication of unique name generation is required.
// If noUnderscores is set to true, then underscores are omitted from the
// output name.
func (s *genState) resolveEnumName(e *yang.Entry, compressPaths, noUnderscores bool) string {
// It is possible, given a particular enumerated leaf, for it to appear
// multiple times in the schema. For example, through being defined in
// a grouping which is instantiated in two places. In these cases, the
// enumerated values must be the same since the path to the node - i.e.,
// module/hierarchy/of/containers/leaf-name must be unique, since we
// cannot have multiple modules of the same name, and paths within the
// module must be unique. To this end, we check whether we are generating
// an enumeration for exactly the same node, and if so, re-use the name
// of the enumeration that has been generated. This improves usability
// for the end user by avoiding multiple enumerated types.
//
// The path that is used for the enumeration is therefore taking the goyang
// "Node" hierarchy - we walk back up the tree until such time as we find
// a node that is not within the same module (parentModulePrettyName(parent) !=
// parentModulePrettyName(currentNode)), and use this as the unique path.
definingModName := parentModulePrettyName(e.Node)
var identifierPathElem []string
for elem := e.Node; elem.ParentNode() != nil && parentModulePrettyName(elem) == definingModName; elem = elem.ParentNode() {
identifierPathElem = append(identifierPathElem, elem.NName())
}
// Since the path elements are compiled from leaf back to root, then reverse them to
// form the path, this is not strictly required, but aids debugging of the elements.
var identifierPath string
for i := len(identifierPathElem) - 1; i >= 0; i-- {
identifierPath = fmt.Sprintf("%s/%s", identifierPath, identifierPathElem[i])
}
// For leaves that have an enumeration within a typedef that is within a union,
// we do not want to just use the place in the schema definition for de-duplication,
// since it becomes confusing for the user to have non-contextual names within
// this context. We therefore rewrite the identifier path to have the context
// that we are in. By default, we just use the name of the node, but in OpenConfig
// schemas we rely on the grandparent name.
if !isYANGBaseType(e.Type) {
idPfx := e.Name
if compressPaths && e.Parent != nil && e.Parent.Parent != nil {
idPfx = e.Parent.Parent.Name
}
identifierPath = fmt.Sprintf("%s%s", idPfx, identifierPath)
}
// If the leaf had already been encountered, then return the previously generated
// name, rather than generating a new name.
if definedName, ok := s.uniqueEnumeratedLeafNames[identifierPath]; ok {
return definedName
}
if compressPaths {
// If we compress paths then the name of this enum is of the form
// ModuleName_GrandParent_Leaf - we use GrandParent since Parent is
// State or Config so would not be unique. The proposed name is
// handed to makeNameUnique to ensure that it does not clash with
// other defined names.
name := fmt.Sprintf("%s_%s_%s", yang.CamelCase(definingModName), yang.CamelCase(e.Parent.Parent.Name), yang.CamelCase(e.Name))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// If this was we don't compress the paths, then we write out the entire path.
var nbuf bytes.Buffer
for i, p := range traverseElementSchemaPath(e) {
if i != 0 && !noUnderscores {
nbuf.WriteRune('_')
}
nbuf.WriteString(yang.CamelCase(p))
}
uniqueName := makeNameUnique(nbuf.String(), s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// resolveTypedefEnumeratedName takes a yang.Entry which represents a typedef
// that has an underlying enumerated type (e.g., identityref or enumeration),
// and resolves the name of the enum that will be generated in the corresponding
// Go code.
func (s *genState) resolveTypedefEnumeratedName(e *yang.Entry, noUnderscores bool) (string, error) {
typeName := e.Type.Name
// Handle the case whereby we have been handed an enumeration that is within a
// union. We need to synthesise the name of the type here such that it is based on
// type name, plus the fact that it is an enumeration.
if e.Type.Kind == yang.Yunion {
enumTypes := enumeratedUnionTypes(e.Type.Type)
switch len(enumTypes) {
case 1:
// We specifically say that this is an enumeration within the leaf.
if noUnderscores {
typeName = fmt.Sprintf("%sEnum", enumTypes[0].Name)
} else {
typeName = fmt.Sprintf("%s_Enum", enumTypes[0].Name)
}
case 0:
return "", fmt.Errorf("enumerated type had an empty union within it, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
default:
return "", fmt.Errorf("multiple enumerated types within a single enumeration not supported, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
}
}
if e.Node == nil {
return "", fmt.Errorf("nil Node in enum type %s", e.Name)
}
definingModName := parentModulePrettyName(e.Node)
// Since there can be many leaves that refer to the same typedef, then we do not generate
// a name for each of them, but rather use a common name, we use the non-CamelCase lookup
// as this is unique, whereas post-camelisation, we may have name clashes. Since a typedef
// does not have a 'path' in Goyang, so we synthesise one using the form
// module-name/typedef-name.
typedefKey := fmt.Sprintf("%s/%s", definingModName, typeName)
if definedName, ok := s.uniqueEnumeratedTypedefNames[typedefKey]; ok {
return definedName, nil
}
// The module/typedefName was not already defined with a CamelCase name, so generate one
// here, and store it to be re-used later.
name := fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(typeName))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedTypedefNames[typedefKey] = uniqueName
return uniqueName, nil
}
// enumeratedTypedefTypeName resolves the name of an enumerated typedef (i.e.,
// a typedef which is either an identityref or an enumeration). The resolved
// name is prefixed with the prefix supplied. If the type that was supplied
// within the resolveTypeArgs struct is not a type definition which includes an
// enumerated type, the mappedType returned is nil, otherwise it is populated.
// If noUnderscores is set to true, underscores are omitted from the name
// of the enumerated typedef.
// It returns an error if the type does include an enumerated typedef, but this
// typedef is invalid.
func (s *genState) enumeratedTypedefTypeName(args resolveTypeArgs, prefix string, noUnderscores bool) (*mappedType, error) {
// If the type that is specified is not a built-in type (i.e., one of those
// types which is defined in RFC6020/RFC7950) then we establish what the type
// that we must actually perform the mapping for is. By default, start with
// the type that is specified in the schema.
if !isYANGBaseType(args.yangType) {
switch args.yangType.Kind {
case yang.Yenum, yang.Yidentityref:
// In the case of a typedef that specifies an enumeration or identityref
// then generate a enumerated type in the Go code according to the contextEntry
// which has been provided by the calling code.
if args.contextEntry == nil {
return nil, fmt.Errorf("error mapping node %s due to lack of context", args.yangType.Name)
}
tn, err := s.resolveTypedefEnumeratedName(args.contextEntry, noUnderscores)
if err != nil {
return nil, err
}
return &mappedType{
nativeType: fmt.Sprintf("%s%s", prefix, tn),
isEnumeratedValue: true,
}, nil
}
}
return nil, nil
}
// resolveLeafrefTarget takes an input path and context entry and
// determines the type of the leaf that is referred to by the path, such that
// it can be mapped to a native language type. It returns the yang.YangType that
// is associated with the target, and the target yang.Entry, such that the
// caller can map this to the relevant language type.
func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {
if s.schematree == nil {
// This should not be possible if the calling code generation is
// well structured and builds the schematree during parsing of YANG
// files.
return nil, fmt.Errorf("could not map leafref path: %v, from contextEntry: %v", path, contextEntry)
}
fixedPath, err := fixSchemaTreePath(path, contextEntry)
if err != nil {
return nil, err
}
e := s.schematree.GetLeafValue(fixedPath)
if e == nil {
return nil, fmt.Errorf("could not resolve leafref path: %v from %v, tree: %v", fixedPath, contextEntry, s.schematree)
}
target, ok := e.(*yang.Entry)
if !ok {
return nil, fmt.Errorf("invalid element returned from schema tree, must be a yang.Entry for path %v from %v", path, contextEntry)
}
return target, nil
}
| newGenState | identifier_name |
genstate.go | // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ygen
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/openconfig/gnmi/ctree"
"github.com/openconfig/goyang/pkg/yang"
"github.com/openconfig/ygot/ygot"
)
// genState is used to store the state that is created throughout the code
// generation and must be shared between multiple entities.
type genState struct {
// definedGlobals specifies the global Go names used during code generation.
definedGlobals map[string]bool
// uniqueDirectoryNames is a map keyed by the path of a YANG entity representing a
// directory in the generated code, whose value is the unique name that it
// was mapped to. This allows routines to determine, based on a particular YANG
// entry, how to refer to it when generating code.
uniqueDirectoryNames map[string]string
// uniqueIdentityNames is a map which is keyed by a string in the form of
// definingModule/identityName which stores the Go anme of the enumerated Go type
// that has been created to represent the identity. This allows de-duplication
// between identityref leaves that reference the same underlying identity. The
// name used includes the defining module to avoid clashes between two identities
// that are named the same within different modules.
uniqueIdentityNames map[string]string
// uniqueEnumeratedTypedefNames is a map, keyed by a synthesised path for the typedef,
// generated in the form definingModule/typedefName, the value stores the Go name of
// the enumeration which represents a typedef that includes an enumerated type.
uniqueEnumeratedTypedefNames map[string]string
// uniqueEnumeratedLeafNames is a map, keyed by a synthesised path to an
// enumeration leaf. The path used reflects the data tree path of the leaf
// within the module that it is defined. That is to say, if a module
// example-module defines a hierarchy of global/config/a-leaf where a-leaf
// is of type enumeration, then the path example-module/global/config/a-leaf
// is used for a-leaf in the uniqueEnumeratedLeafNames. The value of the map
// is the name of the Go enuerated value to which it is mapped. The path based
// on the module is guaranteed to be unique, since we cannot have multiple
// modules of the same name, or multiple identical data tree paths within
// the same module. This path is used since a particular leaf may be re-used
// in multiple places, such that if the entire data tree path is used then
// the names that are generated require deduplication. This approach ensures
// that we have the same enumerated value for a particular leaf in multiple
// contexts.
// At the time of writing, in OpenConfig schemas, this occurs where there is
// a module such as openconfig-bgp which defines /bgp and is also used at
// /network-instances/network-instance/protocols/protocol/bgp.
uniqueEnumeratedLeafNames map[string]string
// schematree stores a ctree.Tree structure that represents the YANG
// schema tree. This is used for lookups within the module set where
// they are required, e.g., for leafrefs.
schematree *ctree.Tree
// uniqueProtoMsgNames is a map, keyed by a protobuf package name, that
// contains a map keyed by protobuf message name strings that indicates the
// names that are used within the generated package's context. It is used
// during code generation to ensure uniqueness of the generated names within
// the specified package.
uniqueProtoMsgNames map[string]map[string]bool
// uniqueProtoPackages is a map, keyed by a YANG schema path, that allows
// a path to be resolved into the calculated Protobuf package name that
// is to be used for it.
uniqueProtoPackages map[string]string
// generatedUnions stores a map, keyed by the output name for a union,
// that has already been output in the generated code. This ensures that
// where two entities re-use a union that has already been created (e.g.,
// a leafref to a union) then it is output only once in the generated code.
generatedUnions map[string]bool
}
// newGenState creates a new genState instance, initialised with the default state
// required for code generation.
func newGenState() *genState {
return &genState{
// Mark the name that is used for the binary type as a reserved name
// within the output structs.
definedGlobals: map[string]bool{
ygot.BinaryTypeName: true,
ygot.EmptyTypeName: true,
},
uniqueDirectoryNames: make(map[string]string),
uniqueEnumeratedTypedefNames: make(map[string]string),
uniqueIdentityNames: make(map[string]string),
uniqueEnumeratedLeafNames: make(map[string]string),
uniqueProtoMsgNames: make(map[string]map[string]bool),
uniqueProtoPackages: make(map[string]string),
generatedUnions: make(map[string]bool),
}
}
// enumeratedUnionEntry takes an input YANG union yang.Entry and returns the set of enumerated
// values that should be generated for the entry. New yang.Entry instances are synthesised within
// the yangEnums returned such that enumerations can be generated directly from the output of
// this function in common with enumerations that are not within a union. The name of the enumerated
// value is calculated based on the original context, whether path compression is enabled based
// on the compressPaths boolean, and whether the name should not include underscores, as per the
// noUnderscores boolean.
func (s *genState) enumeratedUnionEntry(e *yang.Entry, compressPaths, noUnderscores bool) ([]*yangEnum, error) {
var es []*yangEnum
for _, t := range enumeratedUnionTypes(e.Type.Type) {
var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok |
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
}
// identityrefBaseTypeFromIdentity takes an input yang.Identity pointer and
// determines the name of the identity used within the generated code for it. The value
// returned is based on the defining module followed by the CamelCase-ified version
// of the identity's name. If noUnderscores is set to false, underscores are omitted
// from the name returned such that the enumerated type name is compliant with
// language styles where underscores are not allowed in names.
func (s *genState) identityrefBaseTypeFromIdentity(i *yang.Identity, noUnderscores bool) string {
definingModName := parentModulePrettyName(i)
// As per a typedef that includes an enumeration, there is a many to one
// relationship between leaves and an identity value, therefore, we want to
// reuse the existing name for the identity enumeration if one exists.
identityKey := fmt.Sprintf("%s/%s", definingModName, i.Name)
if definedName, ok := s.uniqueIdentityNames[identityKey]; ok {
return definedName
}
var name string
if noUnderscores {
name = fmt.Sprintf("%s%s", yang.CamelCase(definingModName), strings.Replace(yang.CamelCase(i.Name), "_", "", -1))
} else {
name = fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(i.Name))
}
// The name of an identityref base type must be unique within the entire generated
// code, so the context of name generation is global.
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueIdentityNames[identityKey] = uniqueName
return uniqueName
}
// resolveEnumName takes a yang.Entry and resolves its name into the type name
// that will be used in the generated code. Whilst a leaf may only be used
// in a single context (i.e., at its own path), resolveEnumName may be called
// multiple times, and hence de-duplication of unique name generation is required.
// If noUnderscores is set to true, then underscores are omitted from the
// output name.
func (s *genState) resolveEnumName(e *yang.Entry, compressPaths, noUnderscores bool) string {
// It is possible, given a particular enumerated leaf, for it to appear
// multiple times in the schema. For example, through being defined in
// a grouping which is instantiated in two places. In these cases, the
// enumerated values must be the same since the path to the node - i.e.,
// module/hierarchy/of/containers/leaf-name must be unique, since we
// cannot have multiple modules of the same name, and paths within the
// module must be unique. To this end, we check whether we are generating
// an enumeration for exactly the same node, and if so, re-use the name
// of the enumeration that has been generated. This improves usability
// for the end user by avoiding multiple enumerated types.
//
// The path that is used for the enumeration is therefore taking the goyang
// "Node" hierarchy - we walk back up the tree until such time as we find
// a node that is not within the same module (parentModulePrettyName(parent) !=
// parentModulePrettyName(currentNode)), and use this as the unique path.
definingModName := parentModulePrettyName(e.Node)
var identifierPathElem []string
for elem := e.Node; elem.ParentNode() != nil && parentModulePrettyName(elem) == definingModName; elem = elem.ParentNode() {
identifierPathElem = append(identifierPathElem, elem.NName())
}
// Since the path elements are compiled from leaf back to root, then reverse them to
// form the path, this is not strictly required, but aids debugging of the elements.
var identifierPath string
for i := len(identifierPathElem) - 1; i >= 0; i-- {
identifierPath = fmt.Sprintf("%s/%s", identifierPath, identifierPathElem[i])
}
// For leaves that have an enumeration within a typedef that is within a union,
// we do not want to just use the place in the schema definition for de-duplication,
// since it becomes confusing for the user to have non-contextual names within
// this context. We therefore rewrite the identifier path to have the context
// that we are in. By default, we just use the name of the node, but in OpenConfig
// schemas we rely on the grandparent name.
if !isYANGBaseType(e.Type) {
idPfx := e.Name
if compressPaths && e.Parent != nil && e.Parent.Parent != nil {
idPfx = e.Parent.Parent.Name
}
identifierPath = fmt.Sprintf("%s%s", idPfx, identifierPath)
}
// If the leaf had already been encountered, then return the previously generated
// name, rather than generating a new name.
if definedName, ok := s.uniqueEnumeratedLeafNames[identifierPath]; ok {
return definedName
}
if compressPaths {
// If we compress paths then the name of this enum is of the form
// ModuleName_GrandParent_Leaf - we use GrandParent since Parent is
// State or Config so would not be unique. The proposed name is
// handed to makeNameUnique to ensure that it does not clash with
// other defined names.
name := fmt.Sprintf("%s_%s_%s", yang.CamelCase(definingModName), yang.CamelCase(e.Parent.Parent.Name), yang.CamelCase(e.Name))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// If this was we don't compress the paths, then we write out the entire path.
var nbuf bytes.Buffer
for i, p := range traverseElementSchemaPath(e) {
if i != 0 && !noUnderscores {
nbuf.WriteRune('_')
}
nbuf.WriteString(yang.CamelCase(p))
}
uniqueName := makeNameUnique(nbuf.String(), s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// resolveTypedefEnumeratedName takes a yang.Entry which represents a typedef
// that has an underlying enumerated type (e.g., identityref or enumeration),
// and resolves the name of the enum that will be generated in the corresponding
// Go code.
func (s *genState) resolveTypedefEnumeratedName(e *yang.Entry, noUnderscores bool) (string, error) {
typeName := e.Type.Name
// Handle the case whereby we have been handed an enumeration that is within a
// union. We need to synthesise the name of the type here such that it is based on
// type name, plus the fact that it is an enumeration.
if e.Type.Kind == yang.Yunion {
enumTypes := enumeratedUnionTypes(e.Type.Type)
switch len(enumTypes) {
case 1:
// We specifically say that this is an enumeration within the leaf.
if noUnderscores {
typeName = fmt.Sprintf("%sEnum", enumTypes[0].Name)
} else {
typeName = fmt.Sprintf("%s_Enum", enumTypes[0].Name)
}
case 0:
return "", fmt.Errorf("enumerated type had an empty union within it, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
default:
return "", fmt.Errorf("multiple enumerated types within a single enumeration not supported, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
}
}
if e.Node == nil {
return "", fmt.Errorf("nil Node in enum type %s", e.Name)
}
definingModName := parentModulePrettyName(e.Node)
// Since there can be many leaves that refer to the same typedef, then we do not generate
// a name for each of them, but rather use a common name, we use the non-CamelCase lookup
// as this is unique, whereas post-camelisation, we may have name clashes. Since a typedef
// does not have a 'path' in Goyang, so we synthesise one using the form
// module-name/typedef-name.
typedefKey := fmt.Sprintf("%s/%s", definingModName, typeName)
if definedName, ok := s.uniqueEnumeratedTypedefNames[typedefKey]; ok {
return definedName, nil
}
// The module/typedefName was not already defined with a CamelCase name, so generate one
// here, and store it to be re-used later.
name := fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(typeName))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedTypedefNames[typedefKey] = uniqueName
return uniqueName, nil
}
// enumeratedTypedefTypeName resolves the name of an enumerated typedef (i.e.,
// a typedef which is either an identityref or an enumeration). The resolved
// name is prefixed with the prefix supplied. If the type that was supplied
// within the resolveTypeArgs struct is not a type definition which includes an
// enumerated type, the mappedType returned is nil, otherwise it is populated.
// If noUnderscores is set to true, underscores are omitted from the name
// of the enumerated typedef.
// It returns an error if the type does include an enumerated typedef, but this
// typedef is invalid.
func (s *genState) enumeratedTypedefTypeName(args resolveTypeArgs, prefix string, noUnderscores bool) (*mappedType, error) {
// If the type that is specified is not a built-in type (i.e., one of those
// types which is defined in RFC6020/RFC7950) then we establish what the type
// that we must actually perform the mapping for is. By default, start with
// the type that is specified in the schema.
if !isYANGBaseType(args.yangType) {
switch args.yangType.Kind {
case yang.Yenum, yang.Yidentityref:
// In the case of a typedef that specifies an enumeration or identityref
// then generate a enumerated type in the Go code according to the contextEntry
// which has been provided by the calling code.
if args.contextEntry == nil {
return nil, fmt.Errorf("error mapping node %s due to lack of context", args.yangType.Name)
}
tn, err := s.resolveTypedefEnumeratedName(args.contextEntry, noUnderscores)
if err != nil {
return nil, err
}
return &mappedType{
nativeType: fmt.Sprintf("%s%s", prefix, tn),
isEnumeratedValue: true,
}, nil
}
}
return nil, nil
}
// resolveLeafrefTarget takes an input path and context entry and
// determines the type of the leaf that is referred to by the path, such that
// it can be mapped to a native language type. It returns the yang.YangType that
// is associated with the target, and the target yang.Entry, such that the
// caller can map this to the relevant language type.
func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {
if s.schematree == nil {
// This should not be possible if the calling code generation is
// well structured and builds the schematree during parsing of YANG
// files.
return nil, fmt.Errorf("could not map leafref path: %v, from contextEntry: %v", path, contextEntry)
}
fixedPath, err := fixSchemaTreePath(path, contextEntry)
if err != nil {
return nil, err
}
e := s.schematree.GetLeafValue(fixedPath)
if e == nil {
return nil, fmt.Errorf("could not resolve leafref path: %v from %v, tree: %v", fixedPath, contextEntry, s.schematree)
}
target, ok := e.(*yang.Entry)
if !ok {
return nil, fmt.Errorf("invalid element returned from schema tree, must be a yang.Entry for path %v from %v", path, contextEntry)
}
return target, nil
}
| {
genEnums[en.name] = en
} | conditional_block |
genstate.go | // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ygen
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/openconfig/gnmi/ctree"
"github.com/openconfig/goyang/pkg/yang"
"github.com/openconfig/ygot/ygot"
)
// genState is used to store the state that is created throughout the code
// generation and must be shared between multiple entities.
type genState struct {
// definedGlobals specifies the global Go names used during code generation.
definedGlobals map[string]bool
// uniqueDirectoryNames is a map keyed by the path of a YANG entity representing a
// directory in the generated code, whose value is the unique name that it
// was mapped to. This allows routines to determine, based on a particular YANG
// entry, how to refer to it when generating code.
uniqueDirectoryNames map[string]string
// uniqueIdentityNames is a map which is keyed by a string in the form of
// definingModule/identityName which stores the Go anme of the enumerated Go type
// that has been created to represent the identity. This allows de-duplication
// between identityref leaves that reference the same underlying identity. The
// name used includes the defining module to avoid clashes between two identities
// that are named the same within different modules.
uniqueIdentityNames map[string]string
// uniqueEnumeratedTypedefNames is a map, keyed by a synthesised path for the typedef,
// generated in the form definingModule/typedefName, the value stores the Go name of
// the enumeration which represents a typedef that includes an enumerated type.
uniqueEnumeratedTypedefNames map[string]string
// uniqueEnumeratedLeafNames is a map, keyed by a synthesised path to an
// enumeration leaf. The path used reflects the data tree path of the leaf
// within the module that it is defined. That is to say, if a module
// example-module defines a hierarchy of global/config/a-leaf where a-leaf
// is of type enumeration, then the path example-module/global/config/a-leaf
// is used for a-leaf in the uniqueEnumeratedLeafNames. The value of the map
// is the name of the Go enuerated value to which it is mapped. The path based
// on the module is guaranteed to be unique, since we cannot have multiple
// modules of the same name, or multiple identical data tree paths within
// the same module. This path is used since a particular leaf may be re-used
// in multiple places, such that if the entire data tree path is used then
// the names that are generated require deduplication. This approach ensures
// that we have the same enumerated value for a particular leaf in multiple
// contexts.
// At the time of writing, in OpenConfig schemas, this occurs where there is
// a module such as openconfig-bgp which defines /bgp and is also used at
// /network-instances/network-instance/protocols/protocol/bgp.
uniqueEnumeratedLeafNames map[string]string
// schematree stores a ctree.Tree structure that represents the YANG
// schema tree. This is used for lookups within the module set where
// they are required, e.g., for leafrefs.
schematree *ctree.Tree
// uniqueProtoMsgNames is a map, keyed by a protobuf package name, that
// contains a map keyed by protobuf message name strings that indicates the
// names that are used within the generated package's context. It is used
// during code generation to ensure uniqueness of the generated names within
// the specified package.
uniqueProtoMsgNames map[string]map[string]bool
// uniqueProtoPackages is a map, keyed by a YANG schema path, that allows
// a path to be resolved into the calculated Protobuf package name that
// is to be used for it.
uniqueProtoPackages map[string]string
// generatedUnions stores a map, keyed by the output name for a union,
// that has already been output in the generated code. This ensures that
// where two entities re-use a union that has already been created (e.g.,
// a leafref to a union) then it is output only once in the generated code.
generatedUnions map[string]bool
}
// newGenState creates a new genState instance, initialised with the default state
// required for code generation.
func newGenState() *genState {
return &genState{
// Mark the name that is used for the binary type as a reserved name
// within the output structs.
definedGlobals: map[string]bool{
ygot.BinaryTypeName: true,
ygot.EmptyTypeName: true,
},
uniqueDirectoryNames: make(map[string]string),
uniqueEnumeratedTypedefNames: make(map[string]string),
uniqueIdentityNames: make(map[string]string),
uniqueEnumeratedLeafNames: make(map[string]string),
uniqueProtoMsgNames: make(map[string]map[string]bool),
uniqueProtoPackages: make(map[string]string),
generatedUnions: make(map[string]bool),
}
}
// enumeratedUnionEntry takes an input YANG union yang.Entry and returns the set of enumerated
// values that should be generated for the entry. New yang.Entry instances are synthesised within
// the yangEnums returned such that enumerations can be generated directly from the output of
// this function in common with enumerations that are not within a union. The name of the enumerated
// value is calculated based on the original context, whether path compression is enabled based
// on the compressPaths boolean, and whether the name should not include underscores, as per the
// noUnderscores boolean.
func (s *genState) enumeratedUnionEntry(e *yang.Entry, compressPaths, noUnderscores bool) ([]*yangEnum, error) {
var es []*yangEnum
for _, t := range enumeratedUnionTypes(e.Type.Type) {
var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok {
genEnums[en.name] = en
}
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
}
// identityrefBaseTypeFromIdentity takes an input yang.Identity pointer and
// determines the name of the identity used within the generated code for it. The value
// returned is based on the defining module followed by the CamelCase-ified version
// of the identity's name. If noUnderscores is set to false, underscores are omitted
// from the name returned such that the enumerated type name is compliant with
// language styles where underscores are not allowed in names.
func (s *genState) identityrefBaseTypeFromIdentity(i *yang.Identity, noUnderscores bool) string {
definingModName := parentModulePrettyName(i)
// As per a typedef that includes an enumeration, there is a many to one
// relationship between leaves and an identity value, therefore, we want to
// reuse the existing name for the identity enumeration if one exists.
identityKey := fmt.Sprintf("%s/%s", definingModName, i.Name)
if definedName, ok := s.uniqueIdentityNames[identityKey]; ok {
return definedName
}
var name string
if noUnderscores {
name = fmt.Sprintf("%s%s", yang.CamelCase(definingModName), strings.Replace(yang.CamelCase(i.Name), "_", "", -1))
} else {
name = fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(i.Name))
}
// The name of an identityref base type must be unique within the entire generated
// code, so the context of name generation is global.
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueIdentityNames[identityKey] = uniqueName
return uniqueName
}
// resolveEnumName takes a yang.Entry and resolves its name into the type name
// that will be used in the generated code. Whilst a leaf may only be used
// in a single context (i.e., at its own path), resolveEnumName may be called
// multiple times, and hence de-duplication of unique name generation is required.
// If noUnderscores is set to true, then underscores are omitted from the
// output name.
func (s *genState) resolveEnumName(e *yang.Entry, compressPaths, noUnderscores bool) string {
// It is possible, given a particular enumerated leaf, for it to appear
// multiple times in the schema. For example, through being defined in
// a grouping which is instantiated in two places. In these cases, the
// enumerated values must be the same since the path to the node - i.e.,
// module/hierarchy/of/containers/leaf-name must be unique, since we
// cannot have multiple modules of the same name, and paths within the
// module must be unique. To this end, we check whether we are generating
// an enumeration for exactly the same node, and if so, re-use the name
// of the enumeration that has been generated. This improves usability
// for the end user by avoiding multiple enumerated types.
//
// The path that is used for the enumeration is therefore taking the goyang
// "Node" hierarchy - we walk back up the tree until such time as we find
// a node that is not within the same module (parentModulePrettyName(parent) !=
// parentModulePrettyName(currentNode)), and use this as the unique path.
definingModName := parentModulePrettyName(e.Node)
var identifierPathElem []string
for elem := e.Node; elem.ParentNode() != nil && parentModulePrettyName(elem) == definingModName; elem = elem.ParentNode() {
identifierPathElem = append(identifierPathElem, elem.NName())
}
// Since the path elements are compiled from leaf back to root, then reverse them to
// form the path, this is not strictly required, but aids debugging of the elements.
var identifierPath string
for i := len(identifierPathElem) - 1; i >= 0; i-- {
identifierPath = fmt.Sprintf("%s/%s", identifierPath, identifierPathElem[i])
}
// For leaves that have an enumeration within a typedef that is within a union,
// we do not want to just use the place in the schema definition for de-duplication,
// since it becomes confusing for the user to have non-contextual names within
// this context. We therefore rewrite the identifier path to have the context
// that we are in. By default, we just use the name of the node, but in OpenConfig
// schemas we rely on the grandparent name.
if !isYANGBaseType(e.Type) {
idPfx := e.Name
if compressPaths && e.Parent != nil && e.Parent.Parent != nil {
idPfx = e.Parent.Parent.Name
}
identifierPath = fmt.Sprintf("%s%s", idPfx, identifierPath)
}
// If the leaf had already been encountered, then return the previously generated
// name, rather than generating a new name.
if definedName, ok := s.uniqueEnumeratedLeafNames[identifierPath]; ok {
return definedName
}
if compressPaths {
// If we compress paths then the name of this enum is of the form
// ModuleName_GrandParent_Leaf - we use GrandParent since Parent is
// State or Config so would not be unique. The proposed name is
// handed to makeNameUnique to ensure that it does not clash with
// other defined names.
name := fmt.Sprintf("%s_%s_%s", yang.CamelCase(definingModName), yang.CamelCase(e.Parent.Parent.Name), yang.CamelCase(e.Name))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// If this was we don't compress the paths, then we write out the entire path.
var nbuf bytes.Buffer
for i, p := range traverseElementSchemaPath(e) {
if i != 0 && !noUnderscores {
nbuf.WriteRune('_')
}
nbuf.WriteString(yang.CamelCase(p))
} | return uniqueName
}
// resolveTypedefEnumeratedName takes a yang.Entry which represents a typedef
// that has an underlying enumerated type (e.g., identityref or enumeration),
// and resolves the name of the enum that will be generated in the corresponding
// Go code.
func (s *genState) resolveTypedefEnumeratedName(e *yang.Entry, noUnderscores bool) (string, error) {
typeName := e.Type.Name
// Handle the case whereby we have been handed an enumeration that is within a
// union. We need to synthesise the name of the type here such that it is based on
// type name, plus the fact that it is an enumeration.
if e.Type.Kind == yang.Yunion {
enumTypes := enumeratedUnionTypes(e.Type.Type)
switch len(enumTypes) {
case 1:
// We specifically say that this is an enumeration within the leaf.
if noUnderscores {
typeName = fmt.Sprintf("%sEnum", enumTypes[0].Name)
} else {
typeName = fmt.Sprintf("%s_Enum", enumTypes[0].Name)
}
case 0:
return "", fmt.Errorf("enumerated type had an empty union within it, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
default:
return "", fmt.Errorf("multiple enumerated types within a single enumeration not supported, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
}
}
if e.Node == nil {
return "", fmt.Errorf("nil Node in enum type %s", e.Name)
}
definingModName := parentModulePrettyName(e.Node)
// Since there can be many leaves that refer to the same typedef, then we do not generate
// a name for each of them, but rather use a common name, we use the non-CamelCase lookup
// as this is unique, whereas post-camelisation, we may have name clashes. Since a typedef
// does not have a 'path' in Goyang, so we synthesise one using the form
// module-name/typedef-name.
typedefKey := fmt.Sprintf("%s/%s", definingModName, typeName)
if definedName, ok := s.uniqueEnumeratedTypedefNames[typedefKey]; ok {
return definedName, nil
}
// The module/typedefName was not already defined with a CamelCase name, so generate one
// here, and store it to be re-used later.
name := fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(typeName))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedTypedefNames[typedefKey] = uniqueName
return uniqueName, nil
}
// enumeratedTypedefTypeName resolves the name of an enumerated typedef (i.e.,
// a typedef which is either an identityref or an enumeration). The resolved
// name is prefixed with the prefix supplied. If the type that was supplied
// within the resolveTypeArgs struct is not a type definition which includes an
// enumerated type, the mappedType returned is nil, otherwise it is populated.
// If noUnderscores is set to true, underscores are omitted from the name
// of the enumerated typedef.
// It returns an error if the type does include an enumerated typedef, but this
// typedef is invalid.
func (s *genState) enumeratedTypedefTypeName(args resolveTypeArgs, prefix string, noUnderscores bool) (*mappedType, error) {
// If the type that is specified is not a built-in type (i.e., one of those
// types which is defined in RFC6020/RFC7950) then we establish what the type
// that we must actually perform the mapping for is. By default, start with
// the type that is specified in the schema.
if !isYANGBaseType(args.yangType) {
switch args.yangType.Kind {
case yang.Yenum, yang.Yidentityref:
// In the case of a typedef that specifies an enumeration or identityref
// then generate a enumerated type in the Go code according to the contextEntry
// which has been provided by the calling code.
if args.contextEntry == nil {
return nil, fmt.Errorf("error mapping node %s due to lack of context", args.yangType.Name)
}
tn, err := s.resolveTypedefEnumeratedName(args.contextEntry, noUnderscores)
if err != nil {
return nil, err
}
return &mappedType{
nativeType: fmt.Sprintf("%s%s", prefix, tn),
isEnumeratedValue: true,
}, nil
}
}
return nil, nil
}
// resolveLeafrefTarget takes an input path and context entry and
// determines the type of the leaf that is referred to by the path, such that
// it can be mapped to a native language type. It returns the yang.YangType that
// is associated with the target, and the target yang.Entry, such that the
// caller can map this to the relevant language type.
func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {
if s.schematree == nil {
// This should not be possible if the calling code generation is
// well structured and builds the schematree during parsing of YANG
// files.
return nil, fmt.Errorf("could not map leafref path: %v, from contextEntry: %v", path, contextEntry)
}
fixedPath, err := fixSchemaTreePath(path, contextEntry)
if err != nil {
return nil, err
}
e := s.schematree.GetLeafValue(fixedPath)
if e == nil {
return nil, fmt.Errorf("could not resolve leafref path: %v from %v, tree: %v", fixedPath, contextEntry, s.schematree)
}
target, ok := e.(*yang.Entry)
if !ok {
return nil, fmt.Errorf("invalid element returned from schema tree, must be a yang.Entry for path %v from %v", path, contextEntry)
}
return target, nil
} | uniqueName := makeNameUnique(nbuf.String(), s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName | random_line_split |
genstate.go | // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ygen
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/openconfig/gnmi/ctree"
"github.com/openconfig/goyang/pkg/yang"
"github.com/openconfig/ygot/ygot"
)
// genState is used to store the state that is created throughout the code
// generation and must be shared between multiple entities.
type genState struct {
// definedGlobals specifies the global Go names used during code generation.
definedGlobals map[string]bool
// uniqueDirectoryNames is a map keyed by the path of a YANG entity representing a
// directory in the generated code, whose value is the unique name that it
// was mapped to. This allows routines to determine, based on a particular YANG
// entry, how to refer to it when generating code.
uniqueDirectoryNames map[string]string
// uniqueIdentityNames is a map which is keyed by a string in the form of
// definingModule/identityName which stores the Go anme of the enumerated Go type
// that has been created to represent the identity. This allows de-duplication
// between identityref leaves that reference the same underlying identity. The
// name used includes the defining module to avoid clashes between two identities
// that are named the same within different modules.
uniqueIdentityNames map[string]string
// uniqueEnumeratedTypedefNames is a map, keyed by a synthesised path for the typedef,
// generated in the form definingModule/typedefName, the value stores the Go name of
// the enumeration which represents a typedef that includes an enumerated type.
uniqueEnumeratedTypedefNames map[string]string
// uniqueEnumeratedLeafNames is a map, keyed by a synthesised path to an
// enumeration leaf. The path used reflects the data tree path of the leaf
// within the module that it is defined. That is to say, if a module
// example-module defines a hierarchy of global/config/a-leaf where a-leaf
// is of type enumeration, then the path example-module/global/config/a-leaf
// is used for a-leaf in the uniqueEnumeratedLeafNames. The value of the map
// is the name of the Go enuerated value to which it is mapped. The path based
// on the module is guaranteed to be unique, since we cannot have multiple
// modules of the same name, or multiple identical data tree paths within
// the same module. This path is used since a particular leaf may be re-used
// in multiple places, such that if the entire data tree path is used then
// the names that are generated require deduplication. This approach ensures
// that we have the same enumerated value for a particular leaf in multiple
// contexts.
// At the time of writing, in OpenConfig schemas, this occurs where there is
// a module such as openconfig-bgp which defines /bgp and is also used at
// /network-instances/network-instance/protocols/protocol/bgp.
uniqueEnumeratedLeafNames map[string]string
// schematree stores a ctree.Tree structure that represents the YANG
// schema tree. This is used for lookups within the module set where
// they are required, e.g., for leafrefs.
schematree *ctree.Tree
// uniqueProtoMsgNames is a map, keyed by a protobuf package name, that
// contains a map keyed by protobuf message name strings that indicates the
// names that are used within the generated package's context. It is used
// during code generation to ensure uniqueness of the generated names within
// the specified package.
uniqueProtoMsgNames map[string]map[string]bool
// uniqueProtoPackages is a map, keyed by a YANG schema path, that allows
// a path to be resolved into the calculated Protobuf package name that
// is to be used for it.
uniqueProtoPackages map[string]string
// generatedUnions stores a map, keyed by the output name for a union,
// that has already been output in the generated code. This ensures that
// where two entities re-use a union that has already been created (e.g.,
// a leafref to a union) then it is output only once in the generated code.
generatedUnions map[string]bool
}
// newGenState creates a new genState instance, initialised with the default state
// required for code generation.
func newGenState() *genState {
return &genState{
// Mark the name that is used for the binary type as a reserved name
// within the output structs.
definedGlobals: map[string]bool{
ygot.BinaryTypeName: true,
ygot.EmptyTypeName: true,
},
uniqueDirectoryNames: make(map[string]string),
uniqueEnumeratedTypedefNames: make(map[string]string),
uniqueIdentityNames: make(map[string]string),
uniqueEnumeratedLeafNames: make(map[string]string),
uniqueProtoMsgNames: make(map[string]map[string]bool),
uniqueProtoPackages: make(map[string]string),
generatedUnions: make(map[string]bool),
}
}
// enumeratedUnionEntry takes an input YANG union yang.Entry and returns the set of enumerated
// values that should be generated for the entry. New yang.Entry instances are synthesised within
// the yangEnums returned such that enumerations can be generated directly from the output of
// this function in common with enumerations that are not within a union. The name of the enumerated
// value is calculated based on the original context, whether path compression is enabled based
// on the compressPaths boolean, and whether the name should not include underscores, as per the
// noUnderscores boolean.
func (s *genState) enumeratedUnionEntry(e *yang.Entry, compressPaths, noUnderscores bool) ([]*yangEnum, error) {
var es []*yangEnum
for _, t := range enumeratedUnionTypes(e.Type.Type) {
var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok {
genEnums[en.name] = en
}
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string |
// identityrefBaseTypeFromIdentity takes an input yang.Identity pointer and
// determines the name of the identity used within the generated code for it. The value
// returned is based on the defining module followed by the CamelCase-ified version
// of the identity's name. If noUnderscores is set to false, underscores are omitted
// from the name returned such that the enumerated type name is compliant with
// language styles where underscores are not allowed in names.
func (s *genState) identityrefBaseTypeFromIdentity(i *yang.Identity, noUnderscores bool) string {
definingModName := parentModulePrettyName(i)
// As per a typedef that includes an enumeration, there is a many to one
// relationship between leaves and an identity value, therefore, we want to
// reuse the existing name for the identity enumeration if one exists.
identityKey := fmt.Sprintf("%s/%s", definingModName, i.Name)
if definedName, ok := s.uniqueIdentityNames[identityKey]; ok {
return definedName
}
var name string
if noUnderscores {
name = fmt.Sprintf("%s%s", yang.CamelCase(definingModName), strings.Replace(yang.CamelCase(i.Name), "_", "", -1))
} else {
name = fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(i.Name))
}
// The name of an identityref base type must be unique within the entire generated
// code, so the context of name generation is global.
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueIdentityNames[identityKey] = uniqueName
return uniqueName
}
// resolveEnumName takes a yang.Entry and resolves its name into the type name
// that will be used in the generated code. Whilst a leaf may only be used
// in a single context (i.e., at its own path), resolveEnumName may be called
// multiple times, and hence de-duplication of unique name generation is required.
// If noUnderscores is set to true, then underscores are omitted from the
// output name.
func (s *genState) resolveEnumName(e *yang.Entry, compressPaths, noUnderscores bool) string {
// It is possible, given a particular enumerated leaf, for it to appear
// multiple times in the schema. For example, through being defined in
// a grouping which is instantiated in two places. In these cases, the
// enumerated values must be the same since the path to the node - i.e.,
// module/hierarchy/of/containers/leaf-name must be unique, since we
// cannot have multiple modules of the same name, and paths within the
// module must be unique. To this end, we check whether we are generating
// an enumeration for exactly the same node, and if so, re-use the name
// of the enumeration that has been generated. This improves usability
// for the end user by avoiding multiple enumerated types.
//
// The path that is used for the enumeration is therefore taking the goyang
// "Node" hierarchy - we walk back up the tree until such time as we find
// a node that is not within the same module (parentModulePrettyName(parent) !=
// parentModulePrettyName(currentNode)), and use this as the unique path.
definingModName := parentModulePrettyName(e.Node)
var identifierPathElem []string
for elem := e.Node; elem.ParentNode() != nil && parentModulePrettyName(elem) == definingModName; elem = elem.ParentNode() {
identifierPathElem = append(identifierPathElem, elem.NName())
}
// Since the path elements are compiled from leaf back to root, then reverse them to
// form the path, this is not strictly required, but aids debugging of the elements.
var identifierPath string
for i := len(identifierPathElem) - 1; i >= 0; i-- {
identifierPath = fmt.Sprintf("%s/%s", identifierPath, identifierPathElem[i])
}
// For leaves that have an enumeration within a typedef that is within a union,
// we do not want to just use the place in the schema definition for de-duplication,
// since it becomes confusing for the user to have non-contextual names within
// this context. We therefore rewrite the identifier path to have the context
// that we are in. By default, we just use the name of the node, but in OpenConfig
// schemas we rely on the grandparent name.
if !isYANGBaseType(e.Type) {
idPfx := e.Name
if compressPaths && e.Parent != nil && e.Parent.Parent != nil {
idPfx = e.Parent.Parent.Name
}
identifierPath = fmt.Sprintf("%s%s", idPfx, identifierPath)
}
// If the leaf had already been encountered, then return the previously generated
// name, rather than generating a new name.
if definedName, ok := s.uniqueEnumeratedLeafNames[identifierPath]; ok {
return definedName
}
if compressPaths {
// If we compress paths then the name of this enum is of the form
// ModuleName_GrandParent_Leaf - we use GrandParent since Parent is
// State or Config so would not be unique. The proposed name is
// handed to makeNameUnique to ensure that it does not clash with
// other defined names.
name := fmt.Sprintf("%s_%s_%s", yang.CamelCase(definingModName), yang.CamelCase(e.Parent.Parent.Name), yang.CamelCase(e.Name))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// If this was we don't compress the paths, then we write out the entire path.
var nbuf bytes.Buffer
for i, p := range traverseElementSchemaPath(e) {
if i != 0 && !noUnderscores {
nbuf.WriteRune('_')
}
nbuf.WriteString(yang.CamelCase(p))
}
uniqueName := makeNameUnique(nbuf.String(), s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// resolveTypedefEnumeratedName takes a yang.Entry which represents a typedef
// that has an underlying enumerated type (e.g., identityref or enumeration),
// and resolves the name of the enum that will be generated in the corresponding
// Go code.
func (s *genState) resolveTypedefEnumeratedName(e *yang.Entry, noUnderscores bool) (string, error) {
typeName := e.Type.Name
// Handle the case whereby we have been handed an enumeration that is within a
// union. We need to synthesise the name of the type here such that it is based on
// type name, plus the fact that it is an enumeration.
if e.Type.Kind == yang.Yunion {
enumTypes := enumeratedUnionTypes(e.Type.Type)
switch len(enumTypes) {
case 1:
// We specifically say that this is an enumeration within the leaf.
if noUnderscores {
typeName = fmt.Sprintf("%sEnum", enumTypes[0].Name)
} else {
typeName = fmt.Sprintf("%s_Enum", enumTypes[0].Name)
}
case 0:
return "", fmt.Errorf("enumerated type had an empty union within it, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
default:
return "", fmt.Errorf("multiple enumerated types within a single enumeration not supported, path: %v, type: %v, enumerated: %v", e.Path(), e.Type, enumTypes)
}
}
if e.Node == nil {
return "", fmt.Errorf("nil Node in enum type %s", e.Name)
}
definingModName := parentModulePrettyName(e.Node)
// Since there can be many leaves that refer to the same typedef, then we do not generate
// a name for each of them, but rather use a common name, we use the non-CamelCase lookup
// as this is unique, whereas post-camelisation, we may have name clashes. Since a typedef
// does not have a 'path' in Goyang, so we synthesise one using the form
// module-name/typedef-name.
typedefKey := fmt.Sprintf("%s/%s", definingModName, typeName)
if definedName, ok := s.uniqueEnumeratedTypedefNames[typedefKey]; ok {
return definedName, nil
}
// The module/typedefName was not already defined with a CamelCase name, so generate one
// here, and store it to be re-used later.
name := fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(typeName))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedTypedefNames[typedefKey] = uniqueName
return uniqueName, nil
}
// enumeratedTypedefTypeName resolves the name of an enumerated typedef (i.e.,
// a typedef which is either an identityref or an enumeration). The resolved
// name is prefixed with the prefix supplied. If the type that was supplied
// within the resolveTypeArgs struct is not a type definition which includes an
// enumerated type, the mappedType returned is nil, otherwise it is populated.
// If noUnderscores is set to true, underscores are omitted from the name
// of the enumerated typedef.
// It returns an error if the type does include an enumerated typedef, but this
// typedef is invalid.
func (s *genState) enumeratedTypedefTypeName(args resolveTypeArgs, prefix string, noUnderscores bool) (*mappedType, error) {
// If the type that is specified is not a built-in type (i.e., one of those
// types which is defined in RFC6020/RFC7950) then we establish what the type
// that we must actually perform the mapping for is. By default, start with
// the type that is specified in the schema.
if !isYANGBaseType(args.yangType) {
switch args.yangType.Kind {
case yang.Yenum, yang.Yidentityref:
// In the case of a typedef that specifies an enumeration or identityref
// then generate a enumerated type in the Go code according to the contextEntry
// which has been provided by the calling code.
if args.contextEntry == nil {
return nil, fmt.Errorf("error mapping node %s due to lack of context", args.yangType.Name)
}
tn, err := s.resolveTypedefEnumeratedName(args.contextEntry, noUnderscores)
if err != nil {
return nil, err
}
return &mappedType{
nativeType: fmt.Sprintf("%s%s", prefix, tn),
isEnumeratedValue: true,
}, nil
}
}
return nil, nil
}
// resolveLeafrefTarget takes an input path and context entry and
// determines the type of the leaf that is referred to by the path, such that
// it can be mapped to a native language type. It returns the yang.YangType that
// is associated with the target, and the target yang.Entry, such that the
// caller can map this to the relevant language type.
func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {
if s.schematree == nil {
// This should not be possible if the calling code generation is
// well structured and builds the schematree during parsing of YANG
// files.
return nil, fmt.Errorf("could not map leafref path: %v, from contextEntry: %v", path, contextEntry)
}
fixedPath, err := fixSchemaTreePath(path, contextEntry)
if err != nil {
return nil, err
}
e := s.schematree.GetLeafValue(fixedPath)
if e == nil {
return nil, fmt.Errorf("could not resolve leafref path: %v from %v, tree: %v", fixedPath, contextEntry, s.schematree)
}
target, ok := e.(*yang.Entry)
if !ok {
return nil, fmt.Errorf("invalid element returned from schema tree, must be a yang.Entry for path %v from %v", path, contextEntry)
}
return target, nil
}
| {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
} | identifier_body |
flappybird.js | var Const = {
BIRD_RADIUS : 28,
BIRD_JUMP_SPEED : 10,
OBST_WIDTH : 85,
OBST_MAX_HEIGHT : 400,
OBST_MIN_HEIGHT : 40,
OBST_COUNT : 10000,
OBST_START_X : 600,
OBST_MARGIN : 300,
OBST_HEAD_HEIGHT : 32,
SCREEN_HEIGHT : 640,
SCREEN_WIDTH : 480,
PASS_HEIGHT : 200,
X_VOL : 4,
G : 0.8,
JUMP_INTERVAL: 8 //can jump after 4 frames
};
var XHH = {
Point : function(x,y) {
this.x = x ? x : 0;
this.y = y ? y : 0;
},
Bird : function() {
this.x = 100;
this.y = 400;
this.vx = Const.X_VOL;
this.vy = 0;
this.r = Const.BIRD_RADIUS;
this.isDead = false;
},
Obstacle : function(x, height, dir) {
this.x = x;
this.dir = dir;
this.y = this.dir == 1 ? 0 : Const.SCREEN_HEIGHT;
this.width = Const.OBST_WIDTH;
this.height = height;
this.passed = false;
},
Game : function() {
},
Node : function(parent, jump, nextCenter) {
this.frame = parent.frame+1;
this.r = Const.BIRD_RADIUS;
this.parent = parent;
this.b = new XHH.Point(parent.b.x + Const.X_VOL, parent.b.y);
this.jump = jump;
this.valid = true;
if(jump) {
this.v = -Const.BIRD_JUMP_SPEED;
this.lastJumpFrame = this.frame;
}
else {
this.v = parent.v + Const.G;
this.lastJumpFrame = parent.lastJumpFrame;
}
this.b.y += this.v;
if(this.b.y < 0) this.b.v = 0;
if(this.b.y - Const.BIRD_RADIUS >= Const.SCREEN_HEIGHT) this.valid = false;
this.g = parent.h + this.b.dis(parent.b);
this.h = nextCenter.dis(this.b) + (nextCenter.y - this.b.y)*(nextCenter.y - this.b.y)*0.01;
this.f = this.g + this.h;
},
OP : function(frame, jump) {
this.frame = frame;
this.jump = jump;
}
};
XHH.Point.prototype = {
dis : function(point) {
return Math.sqrt((this.x - point.x)*(this.x - point.x) + (this.y - point.y)*(this.y - point.y))
}
};
XHH.Node.prototype = {
toOP : function() {
return new XHH.OP(this.frame, this.jump);
}
};
XHH.Bird.prototype = {
jump : function() {
if(this.isDead) return;
this.vy = -Const.BIRD_JUMP_SPEED;
},
update : function() {
if(!this.isDead)
this.x += this.vx;
this.y += this.vy;
if(this.y < 0) {
this.y = 0;
this.vy = 0;
}
if(this.y > Const.SCREEN_HEIGHT - this.r) {
this.y = Const.SCREEN_HEIGHT - this.r;
return;
}
this.vy += Const.G;
},
die : function() {
this.isDead = true;
this.vy = 0;
}
};
XHH.Obstacle.prototype = {
/**
*
* @param {XHH.Bird} bird
*/
hit : function(bird) {
var left = this.x - this.width / 2;
var right = this.x + this.width / 2;
var bottom = this.dir == 1 ? 0 : Const.SCREEN_HEIGHT - this.height;
var top = bottom + this.height;
if(this.dir == 1) {
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y <= top) return true;
if(bird.x >= left && bird.x < right && bird.y - Const.BIRD_RADIUS <= top) return true;
}else{
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y >= bottom) return true;
if(bird.x >= left && bird.x <= right && bird.y + Const.BIRD_RADIUS >= bottom) return true;
}
var bc = new XHH.Point(bird.x, bird.y);
var lc = new XHH.Point(left, this.dir == 1 ? top : bottom);
var rc = new XHH.Point(right, this.dir == 1 ? top : bottom);
if(lc.dis(bc) <= Const.BIRD_RADIUS) return true;
if(rc.dis(bc) <= Const.BIRD_RADIUS) return true;
return false;
}
}
XHH.Game.prototype = {
random : function() {
var x = Math.abs(Math.sin(this.seed++)) * 100;
return x - Math.floor(x);
},
createObstacle : function() {
for(var i=0;i<Const.OBST_COUNT;i++) {
var ht_up = Math.floor(this.random() * (Const.OBST_MAX_HEIGHT - Const.OBST_MIN_HEIGHT)) + Const.OBST_MIN_HEIGHT;
var ht_dw = Const.SCREEN_HEIGHT - Const.PASS_HEIGHT - ht_up;
var x = Const.OBST_START_X + i*Const.OBST_MARGIN;
var obst_up = new XHH.Obstacle(x, ht_up, 1);
var obst_dw = new XHH.Obstacle(x, ht_dw, -1);
this.obsts.push(obst_up);
this.obsts.push(obst_dw);
}
},
gameOver : function(){
this.isGameOver = true;
this.gameOverTime = new Date().getTime();
this.bird.die();
this.saveRecord();
},
checkGameOver : function() {
// hit the floor
if(this.bird.y >= Const.SCREEN_HEIGHT - this.bird.r) return true;
// at most 3*2 obstacles in the view
var passed = false;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(this.bird)) {
console.log('obst ' + (this.obstIndex + i) + ' hitted the bird!');
return true;
}
if(this.bird.x > obst.x && !obst.passed) {
obst.passed = passed = true;
}
}
if(passed) {
this.score++;
if(this.score > this.record) this.record = this.score;
}
return false;
},
hitTest : function(pt) {
for(var i=0;i<6*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(pt)) return true;
}
return false;
},
update : function() {
if(!this.isGameStarted) return;
this.bird.update();
if(this.isGameOver) return;
this.left += this.vx;
if (this.checkGameOver())
this.gameOver();
var obst_lm = this.obsts[this.obstIndex];
// left most obstacle was out of view
if(obst_lm.x + obst_lm.width/2 < this.left)
this.obstIndex+=2;
| if(this.isCOM) {
if(this.ops.length == 0 && this.lastFound) {
this.lastFound = this.AStar();
}
if(this.ops.length != 0) {
while(this.ops[0].frame < this.frame) this.ops.shift();
if(this.ops[0].frame == this.frame) {
this.ops.shift();
this.bird.jump();
}
}
}
this.frame++;
},
drawBird : function() {
ctx.beginPath();
ctx.strokeStyle = "#FFFFFF";
ctx.fillStyle = "#FF0000";
ctx.arc(this.bird.x - this.left, this.bird.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
//ctx.endPath();
},
drawTraj : function() {
for(var i=0;i<this.traj.length;i++)
{
var p = this.traj[i].b;
ctx.beginPath();
ctx.fillStyle = "#0000FF";
ctx.arc(p.x - this.left, p.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
}
},
drawObst : function(obst) {
var x = obst.x - this.left - obst.width/2;
var y = obst.dir == 1 ? 0 : Const.SCREEN_HEIGHT - obst.height;
var x_s = x + obst.width/3;
var w_l = obst.width/3;
var w_r = obst.width/3*2;
var grd=this.ctx.createLinearGradient(x,y,x_s,y);
grd.addColorStop(0,"#75BA6E");
grd.addColorStop(1,"#DDF0D8");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x, y, w_l, obst.height);
var grd=this.ctx.createLinearGradient(x_s,y,x + obst.width, y);
grd.addColorStop(0,"#DDF0D8");
grd.addColorStop(1,"#318C27");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x_s, y, w_r, obst.height);
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 2;
this.ctx.rect(x,y,obst.width,obst.height);
this.ctx.stroke();
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 3;
this.ctx.rect(x,obst.dir == 1 ? y + obst.height - Const.OBST_HEAD_HEIGHT : y, obst.width, Const.OBST_HEAD_HEIGHT);
this.ctx.stroke();
},
drawObsts : function() {
// at most 3*2 obstacles in the view
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
this.drawObst(obst);
}
},
render : function() {
this.update();
this.ctx.clearRect(0,0,Const.SCREEN_WIDTH,Const.SCREEN_HEIGHT);
this.drawObsts();
this.drawTraj();
this.drawBird();
},
getRecord : function() {
var record = localStorage.getItem("record");
return record ? record : 0;
},
saveRecord : function() {
localStorage.setItem("record", this.record);
},
AStar : function() {
var bx = new XHH.Point(this.bird.x, this.bird.y);
var it = null, ib = null;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.x > bx.x && obst.dir == 1 && it == null) it = obst;
if(obst.x > bx.x && obst.dir == -1 && ib == null) ib = obst;
}
var center = new XHH.Point(it.x + this.bird.r*2, it.height + Const.PASS_HEIGHT/2 + this.bird.r);
console.log("A* current = " + bx.x + "," + bx.y + " target = " + center.x + "," + center.y);
var q = new PriorityQueue({ comparator: function(a, b) { return a.f - b.f; }});
var parent = {
parent : null,
b : bx,
g : 0,
h : bx.dis(center),
v : this.bird.vy,
frame : this.frame,
lastJumpFrame : this.frame,
jump : 0,
toOP : function() { return new XHH.OP(this.frame, this.jump)}
};
var n0 = new XHH.Node(parent, false, center);
//var n1 = new XHH.Node(parent, true, center);
var startTime = new Date().getTime();
if(n0.valid && !this.hitTest(n0.b)) q.queue(n0);
//if(n1.valid && !this.hitTest(n1.b)) q.queue(n1);
var created = q.length;
var expended = 0;
var found = false;
while(q.length != 0) {
var p = q.dequeue();
expended ++;
// goal reached
if(p.b.dis(center) < 32) {
console.log("found!");
this.ops = [];
this.traj = [];
this.ops.push(p.toOP());
this.traj.push(p);
var pp = p.parent;
while(pp) {
if(pp.jump) this.ops.push(pp.toOP());
this.traj.push(pp);
pp = pp.parent;
}
this.ops.reverse();
found = true;
break;
}
n0 = new XHH.Node(p, false, center);
if(n0.valid && !this.hitTest(n0.b)) { q.queue(n0); created++; }
if(p.frame - p.lastJumpFrame >= Const.JUMP_INTERVAL)
{
n1 = new XHH.Node(p, true, center);
if(n1.valid && !this.hitTest(n1.b)) { q.queue(n1); created++; }
}
if(expended > 4e5) break;
}
var endTime = new Date().getTime();
console.log("found = " + found + " created = " + created + " expended = " + expended + " time = " + (endTime - startTime));
return found;
},
start : function(isCOM) {
this.isCOM = isCOM;
this.isGameStarted = true;
if(isCOM) {
this.lastFound = this.AStar();
}
},
init : function(seed, ctx) {
this.seed = seed ? seed : 0;
this.ctx = ctx;
this.obstIndex = 0;
this.vx = Const.X_VOL;
this.obsts = [];
this.left = 0;
this.score = 0;
this.isCOM = false;
this.record = this.getRecord();
this.obstIndex = 0;
this.bird = new XHH.Bird();
this.isGameOver = false;
this.isGameStarted = false;
this.createObstacle();
this.ops = [];
this.traj = [];
this.lastFound = false;
this.frame = 0;
},
jump : function() {
if(this.isGameOver && (new Date().getTime() - this.gameOverTime > 500)){
this.init(this.seed, this.ctx);
} else if(!this.isGameStarted){
this.start(false);
this.bird.jump();
} else {
this.isCOM = false;
this.bird.jump();
}
},
onkeydown : function(e) {
var keyCode = ('which' in event) ? event.which : event.keyCode;
switch(keyCode){
case 32: // space
this.jump();
break;
case 68: // d
this.start(true);
break;
}
}
} | random_line_split |
|
flappybird.js | var Const = {
BIRD_RADIUS : 28,
BIRD_JUMP_SPEED : 10,
OBST_WIDTH : 85,
OBST_MAX_HEIGHT : 400,
OBST_MIN_HEIGHT : 40,
OBST_COUNT : 10000,
OBST_START_X : 600,
OBST_MARGIN : 300,
OBST_HEAD_HEIGHT : 32,
SCREEN_HEIGHT : 640,
SCREEN_WIDTH : 480,
PASS_HEIGHT : 200,
X_VOL : 4,
G : 0.8,
JUMP_INTERVAL: 8 //can jump after 4 frames
};
var XHH = {
Point : function(x,y) {
this.x = x ? x : 0;
this.y = y ? y : 0;
},
Bird : function() {
this.x = 100;
this.y = 400;
this.vx = Const.X_VOL;
this.vy = 0;
this.r = Const.BIRD_RADIUS;
this.isDead = false;
},
Obstacle : function(x, height, dir) {
this.x = x;
this.dir = dir;
this.y = this.dir == 1 ? 0 : Const.SCREEN_HEIGHT;
this.width = Const.OBST_WIDTH;
this.height = height;
this.passed = false;
},
Game : function() {
},
Node : function(parent, jump, nextCenter) {
this.frame = parent.frame+1;
this.r = Const.BIRD_RADIUS;
this.parent = parent;
this.b = new XHH.Point(parent.b.x + Const.X_VOL, parent.b.y);
this.jump = jump;
this.valid = true;
if(jump) {
this.v = -Const.BIRD_JUMP_SPEED;
this.lastJumpFrame = this.frame;
}
else {
this.v = parent.v + Const.G;
this.lastJumpFrame = parent.lastJumpFrame;
}
this.b.y += this.v;
if(this.b.y < 0) this.b.v = 0;
if(this.b.y - Const.BIRD_RADIUS >= Const.SCREEN_HEIGHT) this.valid = false;
this.g = parent.h + this.b.dis(parent.b);
this.h = nextCenter.dis(this.b) + (nextCenter.y - this.b.y)*(nextCenter.y - this.b.y)*0.01;
this.f = this.g + this.h;
},
OP : function(frame, jump) {
this.frame = frame;
this.jump = jump;
}
};
XHH.Point.prototype = {
dis : function(point) {
return Math.sqrt((this.x - point.x)*(this.x - point.x) + (this.y - point.y)*(this.y - point.y))
}
};
XHH.Node.prototype = {
toOP : function() {
return new XHH.OP(this.frame, this.jump);
}
};
XHH.Bird.prototype = {
jump : function() {
if(this.isDead) return;
this.vy = -Const.BIRD_JUMP_SPEED;
},
update : function() {
if(!this.isDead)
this.x += this.vx;
this.y += this.vy;
if(this.y < 0) {
this.y = 0;
this.vy = 0;
}
if(this.y > Const.SCREEN_HEIGHT - this.r) {
this.y = Const.SCREEN_HEIGHT - this.r;
return;
}
this.vy += Const.G;
},
die : function() {
this.isDead = true;
this.vy = 0;
}
};
XHH.Obstacle.prototype = {
/**
*
* @param {XHH.Bird} bird
*/
hit : function(bird) {
var left = this.x - this.width / 2;
var right = this.x + this.width / 2;
var bottom = this.dir == 1 ? 0 : Const.SCREEN_HEIGHT - this.height;
var top = bottom + this.height;
if(this.dir == 1) {
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y <= top) return true;
if(bird.x >= left && bird.x < right && bird.y - Const.BIRD_RADIUS <= top) return true;
}else{
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y >= bottom) return true;
if(bird.x >= left && bird.x <= right && bird.y + Const.BIRD_RADIUS >= bottom) return true;
}
var bc = new XHH.Point(bird.x, bird.y);
var lc = new XHH.Point(left, this.dir == 1 ? top : bottom);
var rc = new XHH.Point(right, this.dir == 1 ? top : bottom);
if(lc.dis(bc) <= Const.BIRD_RADIUS) return true;
if(rc.dis(bc) <= Const.BIRD_RADIUS) return true;
return false;
}
}
XHH.Game.prototype = {
random : function() {
var x = Math.abs(Math.sin(this.seed++)) * 100;
return x - Math.floor(x);
},
createObstacle : function() {
for(var i=0;i<Const.OBST_COUNT;i++) {
var ht_up = Math.floor(this.random() * (Const.OBST_MAX_HEIGHT - Const.OBST_MIN_HEIGHT)) + Const.OBST_MIN_HEIGHT;
var ht_dw = Const.SCREEN_HEIGHT - Const.PASS_HEIGHT - ht_up;
var x = Const.OBST_START_X + i*Const.OBST_MARGIN;
var obst_up = new XHH.Obstacle(x, ht_up, 1);
var obst_dw = new XHH.Obstacle(x, ht_dw, -1);
this.obsts.push(obst_up);
this.obsts.push(obst_dw);
}
},
gameOver : function(){
this.isGameOver = true;
this.gameOverTime = new Date().getTime();
this.bird.die();
this.saveRecord();
},
checkGameOver : function() {
// hit the floor
if(this.bird.y >= Const.SCREEN_HEIGHT - this.bird.r) return true;
// at most 3*2 obstacles in the view
var passed = false;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(this.bird)) {
console.log('obst ' + (this.obstIndex + i) + ' hitted the bird!');
return true;
}
if(this.bird.x > obst.x && !obst.passed) {
obst.passed = passed = true;
}
}
if(passed) {
this.score++;
if(this.score > this.record) this.record = this.score;
}
return false;
},
hitTest : function(pt) {
for(var i=0;i<6*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(pt)) return true;
}
return false;
},
update : function() {
if(!this.isGameStarted) return;
this.bird.update();
if(this.isGameOver) return;
this.left += this.vx;
if (this.checkGameOver())
this.gameOver();
var obst_lm = this.obsts[this.obstIndex];
// left most obstacle was out of view
if(obst_lm.x + obst_lm.width/2 < this.left)
this.obstIndex+=2;
if(this.isCOM) {
if(this.ops.length == 0 && this.lastFound) {
this.lastFound = this.AStar();
}
if(this.ops.length != 0) {
while(this.ops[0].frame < this.frame) this.ops.shift();
if(this.ops[0].frame == this.frame) {
this.ops.shift();
this.bird.jump();
}
}
}
this.frame++;
},
drawBird : function() {
ctx.beginPath();
ctx.strokeStyle = "#FFFFFF";
ctx.fillStyle = "#FF0000";
ctx.arc(this.bird.x - this.left, this.bird.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
//ctx.endPath();
},
drawTraj : function() {
for(var i=0;i<this.traj.length;i++)
{
var p = this.traj[i].b;
ctx.beginPath();
ctx.fillStyle = "#0000FF";
ctx.arc(p.x - this.left, p.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
}
},
drawObst : function(obst) {
var x = obst.x - this.left - obst.width/2;
var y = obst.dir == 1 ? 0 : Const.SCREEN_HEIGHT - obst.height;
var x_s = x + obst.width/3;
var w_l = obst.width/3;
var w_r = obst.width/3*2;
var grd=this.ctx.createLinearGradient(x,y,x_s,y);
grd.addColorStop(0,"#75BA6E");
grd.addColorStop(1,"#DDF0D8");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x, y, w_l, obst.height);
var grd=this.ctx.createLinearGradient(x_s,y,x + obst.width, y);
grd.addColorStop(0,"#DDF0D8");
grd.addColorStop(1,"#318C27");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x_s, y, w_r, obst.height);
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 2;
this.ctx.rect(x,y,obst.width,obst.height);
this.ctx.stroke();
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 3;
this.ctx.rect(x,obst.dir == 1 ? y + obst.height - Const.OBST_HEAD_HEIGHT : y, obst.width, Const.OBST_HEAD_HEIGHT);
this.ctx.stroke();
},
drawObsts : function() {
// at most 3*2 obstacles in the view
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
this.drawObst(obst);
}
},
render : function() {
this.update();
this.ctx.clearRect(0,0,Const.SCREEN_WIDTH,Const.SCREEN_HEIGHT);
this.drawObsts();
this.drawTraj();
this.drawBird();
},
getRecord : function() {
var record = localStorage.getItem("record");
return record ? record : 0;
},
saveRecord : function() {
localStorage.setItem("record", this.record);
},
AStar : function() {
var bx = new XHH.Point(this.bird.x, this.bird.y);
var it = null, ib = null;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.x > bx.x && obst.dir == 1 && it == null) it = obst;
if(obst.x > bx.x && obst.dir == -1 && ib == null) ib = obst;
}
var center = new XHH.Point(it.x + this.bird.r*2, it.height + Const.PASS_HEIGHT/2 + this.bird.r);
console.log("A* current = " + bx.x + "," + bx.y + " target = " + center.x + "," + center.y);
var q = new PriorityQueue({ comparator: function(a, b) { return a.f - b.f; }});
var parent = {
parent : null,
b : bx,
g : 0,
h : bx.dis(center),
v : this.bird.vy,
frame : this.frame,
lastJumpFrame : this.frame,
jump : 0,
toOP : function() { return new XHH.OP(this.frame, this.jump)}
};
var n0 = new XHH.Node(parent, false, center);
//var n1 = new XHH.Node(parent, true, center);
var startTime = new Date().getTime();
if(n0.valid && !this.hitTest(n0.b)) q.queue(n0);
//if(n1.valid && !this.hitTest(n1.b)) q.queue(n1);
var created = q.length;
var expended = 0;
var found = false;
while(q.length != 0) {
var p = q.dequeue();
expended ++;
// goal reached
if(p.b.dis(center) < 32) {
console.log("found!");
this.ops = [];
this.traj = [];
this.ops.push(p.toOP());
this.traj.push(p);
var pp = p.parent;
while(pp) {
if(pp.jump) this.ops.push(pp.toOP());
this.traj.push(pp);
pp = pp.parent;
}
this.ops.reverse();
found = true;
break;
}
n0 = new XHH.Node(p, false, center);
if(n0.valid && !this.hitTest(n0.b)) { q.queue(n0); created++; }
if(p.frame - p.lastJumpFrame >= Const.JUMP_INTERVAL)
{
n1 = new XHH.Node(p, true, center);
if(n1.valid && !this.hitTest(n1.b)) { q.queue(n1); created++; }
}
if(expended > 4e5) break;
}
var endTime = new Date().getTime();
console.log("found = " + found + " created = " + created + " expended = " + expended + " time = " + (endTime - startTime));
return found;
},
start : function(isCOM) {
this.isCOM = isCOM;
this.isGameStarted = true;
if(isCOM) {
this.lastFound = this.AStar();
}
},
init : function(seed, ctx) {
this.seed = seed ? seed : 0;
this.ctx = ctx;
this.obstIndex = 0;
this.vx = Const.X_VOL;
this.obsts = [];
this.left = 0;
this.score = 0;
this.isCOM = false;
this.record = this.getRecord();
this.obstIndex = 0;
this.bird = new XHH.Bird();
this.isGameOver = false;
this.isGameStarted = false;
this.createObstacle();
this.ops = [];
this.traj = [];
this.lastFound = false;
this.frame = 0;
},
jump : function() {
if(this.isGameOver && (new Date().getTime() - this.gameOverTime > 500)){
this.init(this.seed, this.ctx);
} else if(!this.isGameStarted) | else {
this.isCOM = false;
this.bird.jump();
}
},
onkeydown : function(e) {
var keyCode = ('which' in event) ? event.which : event.keyCode;
switch(keyCode){
case 32: // space
this.jump();
break;
case 68: // d
this.start(true);
break;
}
}
}
| {
this.start(false);
this.bird.jump();
} | conditional_block |
raft.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb"
"log"
"math/rand"
"time"
)
// helper func
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
// None is a placeholder node ID used when there is no leader.
const None uint64 = 0
// StateType represents the role of a node in a cluster.
type StateType uint64
const (
StateFollower StateType = iota
StateCandidate
StateLeader
)
var stmap = [...]string{
"StateFollower",
"StateCandidate",
"StateLeader",
}
func (st StateType) String() string {
return stmap[uint64(st)]
}
// ErrProposalDropped is returned when the proposal is ignored by some cases,
// so that the proposer can be notified and fail fast.
var ErrProposalDropped = errors.New("raft proposal dropped")
// Config contains the parameters to start a raft.
type Config struct {
// ID is the identity of the local raft. ID cannot be 0.
ID uint64
// peers contains the IDs of all nodes (including self) in the raft cluster. It
// should only be set when starting a new raft cluster. Restarting raft from
// previous configuration will panic if peers is set. peer is private and only
// used for testing right now.
peers []uint64
// ElectionTick is the number of Node.Tick invocations that must pass between
// elections. That is, if a follower does not receive any message from the
// leader of current term before ElectionTick has elapsed, it will become
// candidate and start an election. ElectionTick must be greater than
// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
// unnecessary leader switching.
ElectionTick int
// HeartbeatTick is the number of Node.Tick invocations that must pass between
// heartbeats. That is, a leader sends heartbeat messages to maintain its
// leadership every HeartbeatTick ticks.
HeartbeatTick int
// Storage is the storage for raft. raft generates entries and states to be
// stored in storage. raft reads the persisted entries and states out of
// Storage when it needs. raft reads out the previous state and configuration
// out of storage when restarting.
Storage Storage
// Applied is the last applied index. It should only be set when restarting
// raft. raft will not return entries to the application smaller or equal to
// Applied. If Applied is unset when restarting, raft might return previous
// applied entries. This is a very application dependent configuration.
Applied uint64
}
func (c *Config) validate() error {
if c.ID == None {
return errors.New("cannot use none as id")
}
if c.HeartbeatTick <= 0 {
return errors.New("heartbeat tick must be greater than 0")
}
if c.ElectionTick <= c.HeartbeatTick {
return errors.New("election tick must be greater than heartbeat tick")
}
if c.Storage == nil {
return errors.New("storage cannot be nil")
}
return nil
}
// Progress represents a follower’s progress in the view of the leader. Leader maintains
// progresses of all followers, and sends entries to the follower based on its progress.
type Progress struct {
Match, Next uint64
}
type Raft struct {
id uint64
Term uint64
Vote uint64
// the log
RaftLog *RaftLog
// log replication progress of each peers
Prs map[uint64]*Progress
// this peer's role
State StateType
// votes records
votes map[uint64]bool
// msgs need to send
msgs []pb.Message
// the leader id
Lead uint64
// heartbeat interval, should send
heartbeatTimeout int
// baseline of election interval
electionTimeout int
// number of ticks since it reached last heartbeatTimeout.
// only leader keeps heartbeatElapsed.
heartbeatElapsed int
// Ticks since it reached last electionTimeout when it is leader or candidate.
// Number of ticks since it reached last electionTimeout or received a
// valid message from current leader when it is a follower.
electionElapsed int
// leadTransferee is id of the leader transfer target when its value is not zero.
// Follow the procedure defined in section 3.10 of Raft phd thesis.
// (https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf)
// (Used in 3A leader transfer)
leadTransferee uint64
// Only one conf change may be pending (in the log, but not yet
// applied) at a time. This is enforced via PendingConfIndex, which
// is set to a value >= the log index of the latest pending
// configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) up | pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
}
}
} else {
r.rejectVoting(m)
return
}
}
func (r *Raft) startVoting() {
r.becomeCandidate()
r.votes[r.id] = true
r.Vote = r.id
if r.tallyAndWin() {
r.becomeLeader()
return
}
r.electionElapsed = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
// preparations
logIndex := r.RaftLog.LastIndex()
logTerm, _ := r.RaftLog.Term(logIndex)
m := pb.Message{MsgType: pb.MessageType_MsgRequestVote, From: r.id, To: p, Term: r.Term, LogTerm: logTerm, Index: logIndex}
r.sendMsg(m)
}
}
func (r *Raft) tallyAndWin() bool {
countAccept := 0
//countReject := 0
for _, v := range r.votes {
if v == true {
countAccept++
}
}
if countAccept > len(r.Prs)-countAccept {
return true
} else {
return false
}
}
func (r *Raft) tallyAndLose() bool {
countReject := 0
//countReject := 0
for _, v := range r.votes {
if v == false {
countReject++
}
}
if countReject > len(r.Prs)-countReject {
return true
} else {
return false
}
}
func (r *Raft) handleVotingResponse(m pb.Message) {
if !m.Reject {
r.votes[m.From] = true
} else {
r.votes[m.From] = false
if m.Term > r.Term {
r.State = StateFollower
r.Term = m.Term
r.electionElapsed = 0
}
}
// when more than half servers have voted, we tally
if len(r.votes) > len(r.Prs)-len(r.votes) {
if r.tallyAndWin() {
r.becomeLeader()
}
if r.tallyAndLose() {
// we don't specify leader here
r.becomeFollower(r.Term, 0)
}
}
}
func (r *Raft) handleMsgHeartbeat(m pb.Message) {
}
// Step the entrance of handle message, see `MessageType`
// on `eraftpb.proto` for what msgs should be handled
func (r *Raft) Step(m pb.Message) error {
// Your Code Here (2A).
switch r.State {
case StateFollower:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgHeartbeat:
r.handleMsgHeartbeat(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateCandidate:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgRequestVoteResponse:
r.handleVotingResponse(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateLeader:
switch m.MsgType {
case pb.MessageType_MsgHeartbeat:
r.handleHeartbeat(m)
case pb.MessageType_MsgHup:
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgBeat:
r.handleBeat()
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
case pb.MessageType_MsgPropose:
r.handlePropose(m)
case pb.MessageType_MsgAppendResponse:
r.handleAppendResponse(m)
}
}
return nil
}
func (r *Raft) handlePropose(m pb.Message) {
// update match & next for the leader + followers
r.RaftLog.appendLog(m.Entries)
r.Prs[r.id].Match = r.RaftLog.LastIndex()
r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//r.initializeProgressSecondTime()
// if there is only one node in the scene
if len(r.Prs) == 1 {
r.RaftLog.committed = r.Prs[r.id].Match
}
r.broadcastAppendEntries()
}
func (r *Raft) sendAppendEntries(to uint64) {
// preparations
// notice Index for the algorithm is different from Index for the language
nextLogIndex := r.Prs[to].Next
prevLogIndex := nextLogIndex - 1
prevLogTerm, _ := r.RaftLog.Term(prevLogIndex)
//// if there is nothing to send, we append an NO-OP entry
//if nextLogIndex == r.RaftLog.LastIndex() + 1 {
// // update match & next for the leader
// r.RaftLog.appendLog([]*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}})
// r.Prs[r.id].Match = r.RaftLog.LastIndex()
// r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//}
// convert array of objects to array of pointers
entriesToAppend := r.RaftLog.entries[nextLogIndex-1:]
pointerToEntriesToAppend := make([]*pb.Entry, 0)
for i, _ := range entriesToAppend {
pointerToEntriesToAppend = append(pointerToEntriesToAppend, &entriesToAppend[i])
}
m := pb.Message{MsgType: pb.MessageType_MsgAppend, To: to, From: r.id, Term: r.Term, Index: prevLogIndex, LogTerm: prevLogTerm, Entries: pointerToEntriesToAppend, Commit: r.RaftLog.committed}
r.sendMsg(m)
}
// helpers
func (r *Raft) broadcastAppendEntries() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendAppend(p)
}
}
func (r *Raft) rejectAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: true, Term: r.Term})
}
func (r *Raft) acceptAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: false, Term: r.Term, Index: m.Index + uint64(len(m.Entries))})
}
func (r *Raft) forceAppendEntries(m pb.Message) {
// not sure why this would work...
//if len(m.Entries) == 0 {
// return
//}
// if the len(INCOMING_LOG) >= len(CURRENT_LOG_SINCE_INDEX), simply overwrite the current with the incoming
//if uint64(len(m.Entries)) >= r.RaftLog.LastIndex() - m.Index {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index]
// r.RaftLog.appendLog(m.Entries)
//} else {
// for i, _ := range m.Entries {
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// }
//}
for i, _ := range m.Entries {
if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
break
}
if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term {
} else {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
r.RaftLog.stabled = m.Entries[i].Index - 1
break
}
}
//r.RaftLog.stabled = r.RaftLog.LastIndex()
//for i, _ := range m.Entries {
// if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
//}
}
func (r *Raft) updateCommittedIndex(m pb.Message) {
if len(m.Entries) == 0 {
r.RaftLog.committed = min(m.Commit, m.Index)
return
}
r.RaftLog.committed = min(m.Commit, r.RaftLog.LastIndex())
}
func (r *Raft) countAppendAtIndex(index uint64) int {
count := 0
for _, p := range r.Prs {
if p.Match >= index {
count += 1
}
}
return count
}
func (r *Raft) handleAppendResponse(m pb.Message) {
if m.Reject {
r.Prs[m.From].Next--
// resend append message
r.sendAppend(m.From)
} else {
// update the progress
//if r.Prs[m.From].Match < r.Prs[r.id].Match {
// //r.Prs[m.From].Match = r.Prs[m.From].Next
// //r.Prs[m.From].Next++
// r.Prs[m.From].Match = r.Prs[r.id].Match
// r.Prs[m.From].Next = r.Prs[m.From].Match + 1
//}
r.Prs[m.From].Match = m.Index
r.Prs[m.From].Next = r.Prs[m.From].Match + 1
// make sure the 'log to commit' is in the current term
indexToCommit := r.Prs[m.From].Match
termForIndexToCommit, _ := r.RaftLog.Term(indexToCommit)
if termForIndexToCommit == r.Term && r.RaftLog.committed < indexToCommit {
if r.countAppendAtIndex(indexToCommit) > len(r.Prs)-r.countAppendAtIndex(indexToCommit) {
r.RaftLog.committed = indexToCommit
r.broadcastAppendEntries()
}
}
}
}
// handleAppendEntries handle AppendEntries RPC request
func (r *Raft) handleAppendEntries(m pb.Message) {
if m.Term >= r.Term {
// if not a follower
r.becomeFollower(m.Term, m.From)
// check if m.prevLog exists
targetTerm, err := r.RaftLog.Term(m.Index)
if err != nil && m.Index > 0 {
r.rejectAppendEntries(m)
return
}
if targetTerm != m.LogTerm {
r.rejectAppendEntries(m)
return
} else {
r.forceAppendEntries(m)
r.updateCommittedIndex(m)
r.acceptAppendEntries(m)
//r.RaftLog.stabled = r.RaftLog.committed
return
}
} else {
r.rejectAppendEntries(m)
return
}
}
// handleHeartbeat handle Heartbeat RPC request
func (r *Raft) handleHeartbeat(m pb.Message) {
r.Vote = 0
}
// handleSnapshot handle Snapshot RPC request
func (r *Raft) handleSnapshot(m pb.Message) {
// Your Code Here (2C).
}
// addNode add a new node to raft group
func (r *Raft) addNode(id uint64) {
// Your Code Here (3A).
}
// removeNode remove a node from raft group
func (r *Raft) removeNode(id uint64) {
// Your Code Here (3A).
}
| dateFollowerState(m | identifier_name |
raft.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb"
"log"
"math/rand"
"time"
)
// helper func
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
// None is a placeholder node ID used when there is no leader.
const None uint64 = 0
// StateType represents the role of a node in a cluster.
type StateType uint64
const (
StateFollower StateType = iota
StateCandidate
StateLeader
)
var stmap = [...]string{
"StateFollower",
"StateCandidate",
"StateLeader",
}
func (st StateType) String() string {
return stmap[uint64(st)]
}
// ErrProposalDropped is returned when the proposal is ignored by some cases,
// so that the proposer can be notified and fail fast.
var ErrProposalDropped = errors.New("raft proposal dropped")
// Config contains the parameters to start a raft.
type Config struct {
// ID is the identity of the local raft. ID cannot be 0.
ID uint64
// peers contains the IDs of all nodes (including self) in the raft cluster. It
// should only be set when starting a new raft cluster. Restarting raft from
// previous configuration will panic if peers is set. peer is private and only
// used for testing right now.
peers []uint64
// ElectionTick is the number of Node.Tick invocations that must pass between
// elections. That is, if a follower does not receive any message from the
// leader of current term before ElectionTick has elapsed, it will become
// candidate and start an election. ElectionTick must be greater than
// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
// unnecessary leader switching.
ElectionTick int
// HeartbeatTick is the number of Node.Tick invocations that must pass between
// heartbeats. That is, a leader sends heartbeat messages to maintain its
// leadership every HeartbeatTick ticks.
HeartbeatTick int
// Storage is the storage for raft. raft generates entries and states to be
// stored in storage. raft reads the persisted entries and states out of
// Storage when it needs. raft reads out the previous state and configuration
// out of storage when restarting.
Storage Storage
// Applied is the last applied index. It should only be set when restarting
// raft. raft will not return entries to the application smaller or equal to
// Applied. If Applied is unset when restarting, raft might return previous
// applied entries. This is a very application dependent configuration.
Applied uint64
}
func (c *Config) validate() error {
if c.ID == None {
return errors.New("cannot use none as id")
}
if c.HeartbeatTick <= 0 {
return errors.New("heartbeat tick must be greater than 0")
}
if c.ElectionTick <= c.HeartbeatTick {
return errors.New("election tick must be greater than heartbeat tick")
}
if c.Storage == nil {
return errors.New("storage cannot be nil")
}
return nil
}
// Progress represents a follower’s progress in the view of the leader. Leader maintains
// progresses of all followers, and sends entries to the follower based on its progress.
type Progress struct {
Match, Next uint64
}
type Raft struct {
id uint64
Term uint64
Vote uint64
// the log
RaftLog *RaftLog
// log replication progress of each peers
Prs map[uint64]*Progress
// this peer's role
State StateType
// votes records
votes map[uint64]bool
// msgs need to send
msgs []pb.Message
// the leader id
Lead uint64
// heartbeat interval, should send
heartbeatTimeout int
// baseline of election interval
electionTimeout int
// number of ticks since it reached last heartbeatTimeout.
// only leader keeps heartbeatElapsed.
heartbeatElapsed int
// Ticks since it reached last electionTimeout when it is leader or candidate.
// Number of ticks since it reached last electionTimeout or received a
// valid message from current leader when it is a follower.
electionElapsed int
// leadTransferee is id of the leader transfer target when its value is not zero.
// Follow the procedure defined in section 3.10 of Raft phd thesis.
// (https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf)
// (Used in 3A leader transfer)
leadTransferee uint64
// Only one conf change may be pending (in the log, but not yet
// applied) at a time. This is enforced via PendingConfIndex, which
// is set to a value >= the log index of the latest pending
// configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
}
}
} else {
r.rejectVoting(m)
return
}
}
func (r *Raft) startVoting() {
r.becomeCandidate()
r.votes[r.id] = true
r.Vote = r.id
if r.tallyAndWin() {
r.becomeLeader()
return
}
r.electionElapsed = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
// preparations
logIndex := r.RaftLog.LastIndex()
logTerm, _ := r.RaftLog.Term(logIndex)
m := pb.Message{MsgType: pb.MessageType_MsgRequestVote, From: r.id, To: p, Term: r.Term, LogTerm: logTerm, Index: logIndex}
r.sendMsg(m)
}
}
func (r *Raft) tallyAndWin() bool {
countAccept := 0
//countReject := 0
for _, v := range r.votes {
if v == true {
countAccept++
}
}
if countAccept > len(r.Prs)-countAccept {
return true
} else {
return false
}
}
func (r *Raft) tallyAndLose() bool {
countReject := 0
//countReject := 0
for _, v := range r.votes {
if v == false {
countReject++
}
}
if countReject > len(r.Prs)-countReject {
return true
} else {
return false
}
}
func (r *Raft) handleVotingResponse(m pb.Message) {
if !m.Reject {
r.votes[m.From] = true
} else {
r.votes[m.From] = false
if m.Term > r.Term {
r.State = StateFollower
r.Term = m.Term
r.electionElapsed = 0
}
}
// when more than half servers have voted, we tally
if len(r.votes) > len(r.Prs)-len(r.votes) {
if r.tallyAndWin() {
r.becomeLeader()
}
if r.tallyAndLose() {
// we don't specify leader here
r.becomeFollower(r.Term, 0)
}
}
}
func (r *Raft) handleMsgHeartbeat(m pb.Message) {
}
// Step the entrance of handle message, see `MessageType`
// on `eraftpb.proto` for what msgs should be handled
func (r *Raft) Step(m pb.Message) error {
// Your Code Here (2A).
switch r.State {
case StateFollower:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgHeartbeat:
r.handleMsgHeartbeat(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateCandidate:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgRequestVoteResponse:
r.handleVotingResponse(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateLeader:
switch m.MsgType {
case pb.MessageType_MsgHeartbeat:
r.handleHeartbeat(m)
case pb.MessageType_MsgHup:
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgBeat:
r.handleBeat()
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
case pb.MessageType_MsgPropose:
r.handlePropose(m)
case pb.MessageType_MsgAppendResponse:
r.handleAppendResponse(m)
}
}
return nil
}
func (r *Raft) handlePropose(m pb.Message) {
// update match & next for the leader + followers
r.RaftLog.appendLog(m.Entries)
r.Prs[r.id].Match = r.RaftLog.LastIndex()
r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//r.initializeProgressSecondTime()
// if there is only one node in the scene
if len(r.Prs) == 1 {
r.RaftLog.committed = r.Prs[r.id].Match
}
r.broadcastAppendEntries()
}
func (r *Raft) sendAppendEntries(to uint64) {
// preparations
// notice Index for the algorithm is different from Index for the language
nextLogIndex := r.Prs[to].Next
prevLogIndex := nextLogIndex - 1
prevLogTerm, _ := r.RaftLog.Term(prevLogIndex)
//// if there is nothing to send, we append an NO-OP entry
//if nextLogIndex == r.RaftLog.LastIndex() + 1 {
// // update match & next for the leader
// r.RaftLog.appendLog([]*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}})
// r.Prs[r.id].Match = r.RaftLog.LastIndex() | pointerToEntriesToAppend := make([]*pb.Entry, 0)
for i, _ := range entriesToAppend {
pointerToEntriesToAppend = append(pointerToEntriesToAppend, &entriesToAppend[i])
}
m := pb.Message{MsgType: pb.MessageType_MsgAppend, To: to, From: r.id, Term: r.Term, Index: prevLogIndex, LogTerm: prevLogTerm, Entries: pointerToEntriesToAppend, Commit: r.RaftLog.committed}
r.sendMsg(m)
}
// helpers
func (r *Raft) broadcastAppendEntries() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendAppend(p)
}
}
func (r *Raft) rejectAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: true, Term: r.Term})
}
func (r *Raft) acceptAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: false, Term: r.Term, Index: m.Index + uint64(len(m.Entries))})
}
func (r *Raft) forceAppendEntries(m pb.Message) {
// not sure why this would work...
//if len(m.Entries) == 0 {
// return
//}
// if the len(INCOMING_LOG) >= len(CURRENT_LOG_SINCE_INDEX), simply overwrite the current with the incoming
//if uint64(len(m.Entries)) >= r.RaftLog.LastIndex() - m.Index {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index]
// r.RaftLog.appendLog(m.Entries)
//} else {
// for i, _ := range m.Entries {
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// }
//}
for i, _ := range m.Entries {
if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
break
}
if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term {
} else {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
r.RaftLog.stabled = m.Entries[i].Index - 1
break
}
}
//r.RaftLog.stabled = r.RaftLog.LastIndex()
//for i, _ := range m.Entries {
// if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
//}
}
func (r *Raft) updateCommittedIndex(m pb.Message) {
if len(m.Entries) == 0 {
r.RaftLog.committed = min(m.Commit, m.Index)
return
}
r.RaftLog.committed = min(m.Commit, r.RaftLog.LastIndex())
}
func (r *Raft) countAppendAtIndex(index uint64) int {
count := 0
for _, p := range r.Prs {
if p.Match >= index {
count += 1
}
}
return count
}
func (r *Raft) handleAppendResponse(m pb.Message) {
if m.Reject {
r.Prs[m.From].Next--
// resend append message
r.sendAppend(m.From)
} else {
// update the progress
//if r.Prs[m.From].Match < r.Prs[r.id].Match {
// //r.Prs[m.From].Match = r.Prs[m.From].Next
// //r.Prs[m.From].Next++
// r.Prs[m.From].Match = r.Prs[r.id].Match
// r.Prs[m.From].Next = r.Prs[m.From].Match + 1
//}
r.Prs[m.From].Match = m.Index
r.Prs[m.From].Next = r.Prs[m.From].Match + 1
// make sure the 'log to commit' is in the current term
indexToCommit := r.Prs[m.From].Match
termForIndexToCommit, _ := r.RaftLog.Term(indexToCommit)
if termForIndexToCommit == r.Term && r.RaftLog.committed < indexToCommit {
if r.countAppendAtIndex(indexToCommit) > len(r.Prs)-r.countAppendAtIndex(indexToCommit) {
r.RaftLog.committed = indexToCommit
r.broadcastAppendEntries()
}
}
}
}
// handleAppendEntries handle AppendEntries RPC request
func (r *Raft) handleAppendEntries(m pb.Message) {
if m.Term >= r.Term {
// if not a follower
r.becomeFollower(m.Term, m.From)
// check if m.prevLog exists
targetTerm, err := r.RaftLog.Term(m.Index)
if err != nil && m.Index > 0 {
r.rejectAppendEntries(m)
return
}
if targetTerm != m.LogTerm {
r.rejectAppendEntries(m)
return
} else {
r.forceAppendEntries(m)
r.updateCommittedIndex(m)
r.acceptAppendEntries(m)
//r.RaftLog.stabled = r.RaftLog.committed
return
}
} else {
r.rejectAppendEntries(m)
return
}
}
// handleHeartbeat handle Heartbeat RPC request
func (r *Raft) handleHeartbeat(m pb.Message) {
r.Vote = 0
}
// handleSnapshot handle Snapshot RPC request
func (r *Raft) handleSnapshot(m pb.Message) {
// Your Code Here (2C).
}
// addNode add a new node to raft group
func (r *Raft) addNode(id uint64) {
// Your Code Here (3A).
}
// removeNode remove a node from raft group
func (r *Raft) removeNode(id uint64) {
// Your Code Here (3A).
} | // r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//}
// convert array of objects to array of pointers
entriesToAppend := r.RaftLog.entries[nextLogIndex-1:] | random_line_split |
raft.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb"
"log"
"math/rand"
"time"
)
// helper func
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
// None is a placeholder node ID used when there is no leader.
const None uint64 = 0
// StateType represents the role of a node in a cluster.
type StateType uint64
const (
StateFollower StateType = iota
StateCandidate
StateLeader
)
var stmap = [...]string{
"StateFollower",
"StateCandidate",
"StateLeader",
}
func (st StateType) String() string {
return stmap[uint64(st)]
}
// ErrProposalDropped is returned when the proposal is ignored by some cases,
// so that the proposer can be notified and fail fast.
var ErrProposalDropped = errors.New("raft proposal dropped")
// Config contains the parameters to start a raft.
type Config struct {
// ID is the identity of the local raft. ID cannot be 0.
ID uint64
// peers contains the IDs of all nodes (including self) in the raft cluster. It
// should only be set when starting a new raft cluster. Restarting raft from
// previous configuration will panic if peers is set. peer is private and only
// used for testing right now.
peers []uint64
// ElectionTick is the number of Node.Tick invocations that must pass between
// elections. That is, if a follower does not receive any message from the
// leader of current term before ElectionTick has elapsed, it will become
// candidate and start an election. ElectionTick must be greater than
// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
// unnecessary leader switching.
ElectionTick int
// HeartbeatTick is the number of Node.Tick invocations that must pass between
// heartbeats. That is, a leader sends heartbeat messages to maintain its
// leadership every HeartbeatTick ticks.
HeartbeatTick int
// Storage is the storage for raft. raft generates entries and states to be
// stored in storage. raft reads the persisted entries and states out of
// Storage when it needs. raft reads out the previous state and configuration
// out of storage when restarting.
Storage Storage
// Applied is the last applied index. It should only be set when restarting
// raft. raft will not return entries to the application smaller or equal to
// Applied. If Applied is unset when restarting, raft might return previous
// applied entries. This is a very application dependent configuration.
Applied uint64
}
func (c *Config) validate() error {
if c.ID == None {
return errors.New("cannot use none as id")
}
if c.HeartbeatTick <= 0 {
return errors.New("heartbeat tick must be greater than 0")
}
if c.ElectionTick <= c.HeartbeatTick {
return errors.New("election tick must be greater than heartbeat tick")
}
if c.Storage == nil {
return errors.New("storage cannot be nil")
}
return nil
}
// Progress represents a follower’s progress in the view of the leader. Leader maintains
// progresses of all followers, and sends entries to the follower based on its progress.
type Progress struct {
Match, Next uint64
}
type Raft struct {
id uint64
Term uint64
Vote uint64
// the log
RaftLog *RaftLog
// log replication progress of each peers
Prs map[uint64]*Progress
// this peer's role
State StateType
// votes records
votes map[uint64]bool
// msgs need to send
msgs []pb.Message
// the leader id
Lead uint64
// heartbeat interval, should send
heartbeatTimeout int
// baseline of election interval
electionTimeout int
// number of ticks since it reached last heartbeatTimeout.
// only leader keeps heartbeatElapsed.
heartbeatElapsed int
// Ticks since it reached last electionTimeout when it is leader or candidate.
// Number of ticks since it reached last electionTimeout or received a
// valid message from current leader when it is a follower.
electionElapsed int
// leadTransferee is id of the leader transfer target when its value is not zero.
// Follow the procedure defined in section 3.10 of Raft phd thesis.
// (https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf)
// (Used in 3A leader transfer)
leadTransferee uint64
// Only one conf change may be pending (in the log, but not yet
// applied) at a time. This is enforced via PendingConfIndex, which
// is set to a value >= the log index of the latest pending
// configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
| lse {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
}
}
} else {
r.rejectVoting(m)
return
}
}
func (r *Raft) startVoting() {
r.becomeCandidate()
r.votes[r.id] = true
r.Vote = r.id
if r.tallyAndWin() {
r.becomeLeader()
return
}
r.electionElapsed = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
// preparations
logIndex := r.RaftLog.LastIndex()
logTerm, _ := r.RaftLog.Term(logIndex)
m := pb.Message{MsgType: pb.MessageType_MsgRequestVote, From: r.id, To: p, Term: r.Term, LogTerm: logTerm, Index: logIndex}
r.sendMsg(m)
}
}
func (r *Raft) tallyAndWin() bool {
countAccept := 0
//countReject := 0
for _, v := range r.votes {
if v == true {
countAccept++
}
}
if countAccept > len(r.Prs)-countAccept {
return true
} else {
return false
}
}
func (r *Raft) tallyAndLose() bool {
countReject := 0
//countReject := 0
for _, v := range r.votes {
if v == false {
countReject++
}
}
if countReject > len(r.Prs)-countReject {
return true
} else {
return false
}
}
func (r *Raft) handleVotingResponse(m pb.Message) {
if !m.Reject {
r.votes[m.From] = true
} else {
r.votes[m.From] = false
if m.Term > r.Term {
r.State = StateFollower
r.Term = m.Term
r.electionElapsed = 0
}
}
// when more than half servers have voted, we tally
if len(r.votes) > len(r.Prs)-len(r.votes) {
if r.tallyAndWin() {
r.becomeLeader()
}
if r.tallyAndLose() {
// we don't specify leader here
r.becomeFollower(r.Term, 0)
}
}
}
func (r *Raft) handleMsgHeartbeat(m pb.Message) {
}
// Step the entrance of handle message, see `MessageType`
// on `eraftpb.proto` for what msgs should be handled
func (r *Raft) Step(m pb.Message) error {
// Your Code Here (2A).
switch r.State {
case StateFollower:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgHeartbeat:
r.handleMsgHeartbeat(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateCandidate:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgRequestVoteResponse:
r.handleVotingResponse(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateLeader:
switch m.MsgType {
case pb.MessageType_MsgHeartbeat:
r.handleHeartbeat(m)
case pb.MessageType_MsgHup:
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgBeat:
r.handleBeat()
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
case pb.MessageType_MsgPropose:
r.handlePropose(m)
case pb.MessageType_MsgAppendResponse:
r.handleAppendResponse(m)
}
}
return nil
}
func (r *Raft) handlePropose(m pb.Message) {
// update match & next for the leader + followers
r.RaftLog.appendLog(m.Entries)
r.Prs[r.id].Match = r.RaftLog.LastIndex()
r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//r.initializeProgressSecondTime()
// if there is only one node in the scene
if len(r.Prs) == 1 {
r.RaftLog.committed = r.Prs[r.id].Match
}
r.broadcastAppendEntries()
}
func (r *Raft) sendAppendEntries(to uint64) {
// preparations
// notice Index for the algorithm is different from Index for the language
nextLogIndex := r.Prs[to].Next
prevLogIndex := nextLogIndex - 1
prevLogTerm, _ := r.RaftLog.Term(prevLogIndex)
//// if there is nothing to send, we append an NO-OP entry
//if nextLogIndex == r.RaftLog.LastIndex() + 1 {
// // update match & next for the leader
// r.RaftLog.appendLog([]*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}})
// r.Prs[r.id].Match = r.RaftLog.LastIndex()
// r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//}
// convert array of objects to array of pointers
entriesToAppend := r.RaftLog.entries[nextLogIndex-1:]
pointerToEntriesToAppend := make([]*pb.Entry, 0)
for i, _ := range entriesToAppend {
pointerToEntriesToAppend = append(pointerToEntriesToAppend, &entriesToAppend[i])
}
m := pb.Message{MsgType: pb.MessageType_MsgAppend, To: to, From: r.id, Term: r.Term, Index: prevLogIndex, LogTerm: prevLogTerm, Entries: pointerToEntriesToAppend, Commit: r.RaftLog.committed}
r.sendMsg(m)
}
// helpers
func (r *Raft) broadcastAppendEntries() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendAppend(p)
}
}
func (r *Raft) rejectAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: true, Term: r.Term})
}
func (r *Raft) acceptAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: false, Term: r.Term, Index: m.Index + uint64(len(m.Entries))})
}
func (r *Raft) forceAppendEntries(m pb.Message) {
// not sure why this would work...
//if len(m.Entries) == 0 {
// return
//}
// if the len(INCOMING_LOG) >= len(CURRENT_LOG_SINCE_INDEX), simply overwrite the current with the incoming
//if uint64(len(m.Entries)) >= r.RaftLog.LastIndex() - m.Index {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index]
// r.RaftLog.appendLog(m.Entries)
//} else {
// for i, _ := range m.Entries {
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// }
//}
for i, _ := range m.Entries {
if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
break
}
if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term {
} else {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
r.RaftLog.stabled = m.Entries[i].Index - 1
break
}
}
//r.RaftLog.stabled = r.RaftLog.LastIndex()
//for i, _ := range m.Entries {
// if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
//}
}
func (r *Raft) updateCommittedIndex(m pb.Message) {
if len(m.Entries) == 0 {
r.RaftLog.committed = min(m.Commit, m.Index)
return
}
r.RaftLog.committed = min(m.Commit, r.RaftLog.LastIndex())
}
func (r *Raft) countAppendAtIndex(index uint64) int {
count := 0
for _, p := range r.Prs {
if p.Match >= index {
count += 1
}
}
return count
}
func (r *Raft) handleAppendResponse(m pb.Message) {
if m.Reject {
r.Prs[m.From].Next--
// resend append message
r.sendAppend(m.From)
} else {
// update the progress
//if r.Prs[m.From].Match < r.Prs[r.id].Match {
// //r.Prs[m.From].Match = r.Prs[m.From].Next
// //r.Prs[m.From].Next++
// r.Prs[m.From].Match = r.Prs[r.id].Match
// r.Prs[m.From].Next = r.Prs[m.From].Match + 1
//}
r.Prs[m.From].Match = m.Index
r.Prs[m.From].Next = r.Prs[m.From].Match + 1
// make sure the 'log to commit' is in the current term
indexToCommit := r.Prs[m.From].Match
termForIndexToCommit, _ := r.RaftLog.Term(indexToCommit)
if termForIndexToCommit == r.Term && r.RaftLog.committed < indexToCommit {
if r.countAppendAtIndex(indexToCommit) > len(r.Prs)-r.countAppendAtIndex(indexToCommit) {
r.RaftLog.committed = indexToCommit
r.broadcastAppendEntries()
}
}
}
}
// handleAppendEntries handle AppendEntries RPC request
func (r *Raft) handleAppendEntries(m pb.Message) {
if m.Term >= r.Term {
// if not a follower
r.becomeFollower(m.Term, m.From)
// check if m.prevLog exists
targetTerm, err := r.RaftLog.Term(m.Index)
if err != nil && m.Index > 0 {
r.rejectAppendEntries(m)
return
}
if targetTerm != m.LogTerm {
r.rejectAppendEntries(m)
return
} else {
r.forceAppendEntries(m)
r.updateCommittedIndex(m)
r.acceptAppendEntries(m)
//r.RaftLog.stabled = r.RaftLog.committed
return
}
} else {
r.rejectAppendEntries(m)
return
}
}
// handleHeartbeat handle Heartbeat RPC request
func (r *Raft) handleHeartbeat(m pb.Message) {
r.Vote = 0
}
// handleSnapshot handle Snapshot RPC request
func (r *Raft) handleSnapshot(m pb.Message) {
// Your Code Here (2C).
}
// addNode add a new node to raft group
func (r *Raft) addNode(id uint64) {
// Your Code Here (3A).
}
// removeNode remove a node from raft group
func (r *Raft) removeNode(id uint64) {
// Your Code Here (3A).
}
| r.rejectVoting(m)
return
} e | conditional_block |
raft.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb"
"log"
"math/rand"
"time"
)
// helper func
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
// None is a placeholder node ID used when there is no leader.
const None uint64 = 0
// StateType represents the role of a node in a cluster.
type StateType uint64
const (
StateFollower StateType = iota
StateCandidate
StateLeader
)
var stmap = [...]string{
"StateFollower",
"StateCandidate",
"StateLeader",
}
func (st StateType) String() string {
return stmap[uint64(st)]
}
// ErrProposalDropped is returned when the proposal is ignored by some cases,
// so that the proposer can be notified and fail fast.
var ErrProposalDropped = errors.New("raft proposal dropped")
// Config contains the parameters to start a raft.
type Config struct {
// ID is the identity of the local raft. ID cannot be 0.
ID uint64
// peers contains the IDs of all nodes (including self) in the raft cluster. It
// should only be set when starting a new raft cluster. Restarting raft from
// previous configuration will panic if peers is set. peer is private and only
// used for testing right now.
peers []uint64
// ElectionTick is the number of Node.Tick invocations that must pass between
// elections. That is, if a follower does not receive any message from the
// leader of current term before ElectionTick has elapsed, it will become
// candidate and start an election. ElectionTick must be greater than
// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
// unnecessary leader switching.
ElectionTick int
// HeartbeatTick is the number of Node.Tick invocations that must pass between
// heartbeats. That is, a leader sends heartbeat messages to maintain its
// leadership every HeartbeatTick ticks.
HeartbeatTick int
// Storage is the storage for raft. raft generates entries and states to be
// stored in storage. raft reads the persisted entries and states out of
// Storage when it needs. raft reads out the previous state and configuration
// out of storage when restarting.
Storage Storage
// Applied is the last applied index. It should only be set when restarting
// raft. raft will not return entries to the application smaller or equal to
// Applied. If Applied is unset when restarting, raft might return previous
// applied entries. This is a very application dependent configuration.
Applied uint64
}
func (c *Config) validate() error {
if c.ID == None {
return errors.New("cannot use none as id")
}
if c.HeartbeatTick <= 0 {
return errors.New("heartbeat tick must be greater than 0")
}
if c.ElectionTick <= c.HeartbeatTick {
return errors.New("election tick must be greater than heartbeat tick")
}
if c.Storage == nil {
return errors.New("storage cannot be nil")
}
return nil
}
// Progress represents a follower’s progress in the view of the leader. Leader maintains
// progresses of all followers, and sends entries to the follower based on its progress.
type Progress struct {
Match, Next uint64
}
type Raft struct {
id uint64
Term uint64
Vote uint64
// the log
RaftLog *RaftLog
// log replication progress of each peers
Prs map[uint64]*Progress
// this peer's role
State StateType
// votes records
votes map[uint64]bool
// msgs need to send
msgs []pb.Message
// the leader id
Lead uint64
// heartbeat interval, should send
heartbeatTimeout int
// baseline of election interval
electionTimeout int
// number of ticks since it reached last heartbeatTimeout.
// only leader keeps heartbeatElapsed.
heartbeatElapsed int
// Ticks since it reached last electionTimeout when it is leader or candidate.
// Number of ticks since it reached last electionTimeout or received a
// valid message from current leader when it is a follower.
electionElapsed int
// leadTransferee is id of the leader transfer target when its value is not zero.
// Follow the procedure defined in section 3.10 of Raft phd thesis.
// (https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf)
// (Used in 3A leader transfer)
leadTransferee uint64
// Only one conf change may be pending (in the log, but not yet
// applied) at a time. This is enforced via PendingConfIndex, which
// is set to a value >= the log index of the latest pending
// configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
| func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
}
}
} else {
r.rejectVoting(m)
return
}
}
func (r *Raft) startVoting() {
r.becomeCandidate()
r.votes[r.id] = true
r.Vote = r.id
if r.tallyAndWin() {
r.becomeLeader()
return
}
r.electionElapsed = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
// preparations
logIndex := r.RaftLog.LastIndex()
logTerm, _ := r.RaftLog.Term(logIndex)
m := pb.Message{MsgType: pb.MessageType_MsgRequestVote, From: r.id, To: p, Term: r.Term, LogTerm: logTerm, Index: logIndex}
r.sendMsg(m)
}
}
func (r *Raft) tallyAndWin() bool {
countAccept := 0
//countReject := 0
for _, v := range r.votes {
if v == true {
countAccept++
}
}
if countAccept > len(r.Prs)-countAccept {
return true
} else {
return false
}
}
func (r *Raft) tallyAndLose() bool {
countReject := 0
//countReject := 0
for _, v := range r.votes {
if v == false {
countReject++
}
}
if countReject > len(r.Prs)-countReject {
return true
} else {
return false
}
}
func (r *Raft) handleVotingResponse(m pb.Message) {
if !m.Reject {
r.votes[m.From] = true
} else {
r.votes[m.From] = false
if m.Term > r.Term {
r.State = StateFollower
r.Term = m.Term
r.electionElapsed = 0
}
}
// when more than half servers have voted, we tally
if len(r.votes) > len(r.Prs)-len(r.votes) {
if r.tallyAndWin() {
r.becomeLeader()
}
if r.tallyAndLose() {
// we don't specify leader here
r.becomeFollower(r.Term, 0)
}
}
}
func (r *Raft) handleMsgHeartbeat(m pb.Message) {
}
// Step the entrance of handle message, see `MessageType`
// on `eraftpb.proto` for what msgs should be handled
func (r *Raft) Step(m pb.Message) error {
// Your Code Here (2A).
switch r.State {
case StateFollower:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgHeartbeat:
r.handleMsgHeartbeat(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateCandidate:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgRequestVoteResponse:
r.handleVotingResponse(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateLeader:
switch m.MsgType {
case pb.MessageType_MsgHeartbeat:
r.handleHeartbeat(m)
case pb.MessageType_MsgHup:
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgBeat:
r.handleBeat()
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
case pb.MessageType_MsgPropose:
r.handlePropose(m)
case pb.MessageType_MsgAppendResponse:
r.handleAppendResponse(m)
}
}
return nil
}
func (r *Raft) handlePropose(m pb.Message) {
// update match & next for the leader + followers
r.RaftLog.appendLog(m.Entries)
r.Prs[r.id].Match = r.RaftLog.LastIndex()
r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//r.initializeProgressSecondTime()
// if there is only one node in the scene
if len(r.Prs) == 1 {
r.RaftLog.committed = r.Prs[r.id].Match
}
r.broadcastAppendEntries()
}
func (r *Raft) sendAppendEntries(to uint64) {
// preparations
// notice Index for the algorithm is different from Index for the language
nextLogIndex := r.Prs[to].Next
prevLogIndex := nextLogIndex - 1
prevLogTerm, _ := r.RaftLog.Term(prevLogIndex)
//// if there is nothing to send, we append an NO-OP entry
//if nextLogIndex == r.RaftLog.LastIndex() + 1 {
// // update match & next for the leader
// r.RaftLog.appendLog([]*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}})
// r.Prs[r.id].Match = r.RaftLog.LastIndex()
// r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//}
// convert array of objects to array of pointers
entriesToAppend := r.RaftLog.entries[nextLogIndex-1:]
pointerToEntriesToAppend := make([]*pb.Entry, 0)
for i, _ := range entriesToAppend {
pointerToEntriesToAppend = append(pointerToEntriesToAppend, &entriesToAppend[i])
}
m := pb.Message{MsgType: pb.MessageType_MsgAppend, To: to, From: r.id, Term: r.Term, Index: prevLogIndex, LogTerm: prevLogTerm, Entries: pointerToEntriesToAppend, Commit: r.RaftLog.committed}
r.sendMsg(m)
}
// helpers
func (r *Raft) broadcastAppendEntries() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendAppend(p)
}
}
func (r *Raft) rejectAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: true, Term: r.Term})
}
func (r *Raft) acceptAppendEntries(m pb.Message) {
r.sendMsg(pb.Message{MsgType: pb.MessageType_MsgAppendResponse, From: r.id, To: m.From, Reject: false, Term: r.Term, Index: m.Index + uint64(len(m.Entries))})
}
func (r *Raft) forceAppendEntries(m pb.Message) {
// not sure why this would work...
//if len(m.Entries) == 0 {
// return
//}
// if the len(INCOMING_LOG) >= len(CURRENT_LOG_SINCE_INDEX), simply overwrite the current with the incoming
//if uint64(len(m.Entries)) >= r.RaftLog.LastIndex() - m.Index {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index]
// r.RaftLog.appendLog(m.Entries)
//} else {
// for i, _ := range m.Entries {
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// }
//}
for i, _ := range m.Entries {
if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
break
}
if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term {
} else {
r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
r.RaftLog.appendLog(m.Entries[i:])
r.RaftLog.stabled = m.Entries[i].Index - 1
break
}
}
//r.RaftLog.stabled = r.RaftLog.LastIndex()
//for i, _ := range m.Entries {
// if m.Index + (uint64(i)) + 1 > r.RaftLog.LastIndex() {
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
// if r.RaftLog.entries[m.Index + (uint64(i))].Index == m.Entries[i].Index && r.RaftLog.entries[m.Index + (uint64(i))].Term == m.Entries[i].Term{
// } else {
// r.RaftLog.entries = r.RaftLog.entries[:m.Index + (uint64(i))]
// r.RaftLog.appendLog(m.Entries[i:])
// break
// }
//}
}
func (r *Raft) updateCommittedIndex(m pb.Message) {
if len(m.Entries) == 0 {
r.RaftLog.committed = min(m.Commit, m.Index)
return
}
r.RaftLog.committed = min(m.Commit, r.RaftLog.LastIndex())
}
func (r *Raft) countAppendAtIndex(index uint64) int {
count := 0
for _, p := range r.Prs {
if p.Match >= index {
count += 1
}
}
return count
}
func (r *Raft) handleAppendResponse(m pb.Message) {
if m.Reject {
r.Prs[m.From].Next--
// resend append message
r.sendAppend(m.From)
} else {
// update the progress
//if r.Prs[m.From].Match < r.Prs[r.id].Match {
// //r.Prs[m.From].Match = r.Prs[m.From].Next
// //r.Prs[m.From].Next++
// r.Prs[m.From].Match = r.Prs[r.id].Match
// r.Prs[m.From].Next = r.Prs[m.From].Match + 1
//}
r.Prs[m.From].Match = m.Index
r.Prs[m.From].Next = r.Prs[m.From].Match + 1
// make sure the 'log to commit' is in the current term
indexToCommit := r.Prs[m.From].Match
termForIndexToCommit, _ := r.RaftLog.Term(indexToCommit)
if termForIndexToCommit == r.Term && r.RaftLog.committed < indexToCommit {
if r.countAppendAtIndex(indexToCommit) > len(r.Prs)-r.countAppendAtIndex(indexToCommit) {
r.RaftLog.committed = indexToCommit
r.broadcastAppendEntries()
}
}
}
}
// handleAppendEntries handle AppendEntries RPC request
func (r *Raft) handleAppendEntries(m pb.Message) {
if m.Term >= r.Term {
// if not a follower
r.becomeFollower(m.Term, m.From)
// check if m.prevLog exists
targetTerm, err := r.RaftLog.Term(m.Index)
if err != nil && m.Index > 0 {
r.rejectAppendEntries(m)
return
}
if targetTerm != m.LogTerm {
r.rejectAppendEntries(m)
return
} else {
r.forceAppendEntries(m)
r.updateCommittedIndex(m)
r.acceptAppendEntries(m)
//r.RaftLog.stabled = r.RaftLog.committed
return
}
} else {
r.rejectAppendEntries(m)
return
}
}
// handleHeartbeat handle Heartbeat RPC request
func (r *Raft) handleHeartbeat(m pb.Message) {
r.Vote = 0
}
// handleSnapshot handle Snapshot RPC request
func (r *Raft) handleSnapshot(m pb.Message) {
// Your Code Here (2C).
}
// addNode add a new node to raft group
func (r *Raft) addNode(id uint64) {
// Your Code Here (3A).
}
// removeNode remove a node from raft group
func (r *Raft) removeNode(id uint64) {
// Your Code Here (3A).
}
| r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
| identifier_body |
SEODWARF_S2_TURB_fMask.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on the 23 March 2017
author : olivier regniers (i-sea)
Application : reads metadata, applies coastline mask, performs atmospheric correction using DOS1 approach and export result in outFolder, estimates Turbidity with method from Dogliotti 2015 based on Red and NIR reflectance and exports result as an image, writes an xml file containing metadata about the image and statistics on estimated turbidity
example : python C:\Users\ucfadko\Desktop\SEODWARF_S2_TURB_fMask.py -inFolder C:\Users\ucfadko\Desktop\S2A_MSIL1C_20170203T083141_N0204_R021_T36SWD_20170203T083527 -outFolder C:\Users\ucfadko\Desktop\S2_Results
Milto's example: python C:\Users\Milto\Documents\TEPAK\RISE\SEO_DWARF_algorithms\Code\SEODWARF_S2_TURB_fMask.py -inFolder C:\Users\Milto\Documents\TEPAK\RISE\TestData\S2A_MSIL1C_20170225T091021_N0204_R050_T35SLA_20170225T091220 -outFolder C:\Users\Milto\Documents\TEPAK\RISE\TestResults
note1 : classical folder and files structure for raw L1C S2 data has to be conserved for the code to run properly
note2 : code needs a coastline shapefile to apply land/water mask, check within the code if path to shapefile is correct (around line 170)
note 3 : code needs fmask to perform cloud masking, check within the code if path to fmask python functions is correct (around line 126) or add directory to working path
"""
import os
import sys
import gdal, ogr
from gdalconst import *
import argparse
import subprocess
import numpy
import math
import xml.etree.ElementTree as ET
import glob
import re
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument("-inFolder",
required=True, | required=True,
help="path to folder where results are stored",
metavar='<string>')
params = vars(parser.parse_args())
inFolder = params['inFolder']
outFolder = params['outFolder']
# recover list of bands to be processed
bandNum10m = [4,3,2,8]
bandNumAll = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12']
bandList = []
allFiles = glob.glob(inFolder + '\*\GRANULE\*\IMG_DATA\*.jp2')
for b in range(len(bandNumAll)):
for file in allFiles:
if file.endswith('_%s.jp2' % bandNumAll[b]):
bandList.append(file)
# recover metadata file
os.listdir(inFolder + '/GRANULE/')
MTD_MSIL1C = inFolder + '/MTD_MSIL1C.xml'
MTD_TL = inFolder + '/GRANULE/' + os.listdir(inFolder + '/GRANULE/')[0] + '/MTD_TL.xml'
# --- read metadata ---
print('Read metadata files')
# search tile name in input folder
match = re.search('([A-Z][1-9][1-9][A-Z][A-Z][A-Z])', inFolder)
tile = match.group(0)
EPSG_code = 'EPSG_326' + tile[1:3]
dataset = gdal.Open('SENTINEL2_L1C:%s:10m:%s' % (MTD_MSIL1C, EPSG_code), GA_ReadOnly)
if dataset is None:
print('Unable to open image')
sys.exit(1)
MTD = dataset.GetMetadata()
wkt_projection =dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
# print MTD
# read metadata per band
bandB = dataset.GetRasterBand(3)
bandG = dataset.GetRasterBand(2)
bandR = dataset.GetRasterBand(1)
bandIR = dataset.GetRasterBand(4)
MTD_B = bandB.GetMetadata()
MTD_G = bandG.GetMetadata()
MTD_R = bandR.GetMetadata()
MTD_IR = bandIR.GetMetadata()
# --- recover values from metadata ---
ULX = geotransform[0]
ULY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
nl = dataset.RasterYSize
nc = dataset.RasterXSize
DATE = MTD['GENERATION_TIME'][0:10]
HOUR = MTD['GENERATION_TIME'][11:16]
if MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2A':
sensor = 'S2A'
elif MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2B':
sensor = 'S2B'
imageNameLong = MTD['PRODUCT_URI']
# pos_ = [pos for pos, char in enumerate(imageNameLong) if char == '_']
# tile = imageNameLong[pos_[4]+1:pos_[4]+7]
dst_earth_sun = float(MTD['REFLECTANCE_CONVERSION_U'])
QUANT = int(MTD['QUANTIFICATION_VALUE'])
ESUN = [MTD_R['SOLAR_IRRADIANCE'], MTD_G['SOLAR_IRRADIANCE'], MTD_B['SOLAR_IRRADIANCE'], MTD_IR['SOLAR_IRRADIANCE']]
# read xml file and get extra metadata
root = ET.parse(MTD_TL).getroot()
tmp = root.find('.//Mean_Sun_Angle/ZENITH_ANGLE')
thetas = 90 - float(tmp.text)
print('DONE\n')
# --- fMask cloud masking ---
# print('Cloud mask with fMask')
# create ouput image name and output folder
imageName = sensor + '_' + DATE + '_' + tile
outFolder2 = outFolder + '/' + imageName
"""
if not os.path.exists(outFolder2):
os.mkdir(outFolder2)
out_cloud_Mask = outFolder2 + '/cloud_FMask.tif'
if not os.path.isfile(out_cloud_Mask): # if mask doesn't already exist
os.chdir('C:\Users\Olivier\Anaconda2\Scripts') # change path to fmask folder
# create virtual raster
outVRT = outFolder2 + '/allbands.vrt'
cmd = 'gdalbuildvrt -resolution user -tr 20 20 -separate ' + outVRT
for b in bandList:
cmd = cmd + ' ' + b
subprocess.call(cmd, shell=True)
# create angle image
outAngles = outFolder2 + '/angles.img'
cmd = 'python fmask_sentinel2makeAnglesImage.py -i ' + MTD_TL + ' -o ' + outAngles
subprocess.call(cmd, shell=True)
# create mask
outFMASK = outFolder2 + '/cloud.img'
cmd = 'python fmask_sentinel2Stacked.py -a ' + outVRT + ' -z ' + outAngles + ' -o ' + outFMASK
subprocess.call(cmd, shell=True)
# resample mask
cmd = 'gdalwarp -tr 10 10 -ot Byte ' + outFMASK + ' ' + out_cloud_Mask
subprocess.call(cmd, shell=True)
print('DONE\n')
else:
print('Cloud masking already done\n')
"""
# --- DOS1 correction ---
# rasterize input shapefile mask
print('DOS1 atmospheric correction')
# check if DOS1 correction has already been applied
if not os.path.exists(outFolder2 + '/TOC'):
os.mkdir(outFolder2 + '/TOC')
DOS_red = outFolder2+'/TOC/' + imageName + '_B04_TOC.tif'
if not os.path.isfile(DOS_red): # if outFile does not already exist
pathMaskShp = 'C:/Users/ucfadko/Desktop/Coastline_EUROPE_UTMZ33N/Coastline_EUROPE_UTMZ33N.shp'
outLW_Mask = outFolder2 + '/LW_mask.tif'
# check if shapefile exists
if not os.path.isfile(pathMaskShp):
print('Coastline shapefile is not in the right folder')
sys.exit(1)
Xmin = ULX
Xmax = ULX + nc*pixelWidth
Ymin = ULY - nl*pixelWidth
Ymax = ULY
print ('Water/Land mask creation')
cmd = 'gdal_rasterize -a id -ot Byte -te ' + str(Xmin) + ' ' + str(Ymin) + ' ' + str(Xmax) + ' ' + str(Ymax) + ' -tr ' + str(pixelWidth)+ ' ' + str(pixelWidth) + ' ' + pathMaskShp + ' ' + outLW_Mask
# print cmd
subprocess.call(cmd, shell=True)
print ('DONE\n')
# read land/water mask
ds_LW_mask = gdal.Open(outLW_Mask, GA_ReadOnly)
if ds_LW_mask is None:
print('Unable to open land/water mask')
sys.exit(1)
LW_mask = ds_LW_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_LW_mask.RasterXSize, ds_LW_mask.RasterYSize)
"""
# read cloud mask
ds_cloud_mask = gdal.Open(out_cloud_Mask, GA_ReadOnly)
if ds_cloud_mask is None:
print('Unable to open cloud mask')
sys.exit(1)
cloud_mask = ds_cloud_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_cloud_mask.RasterXSize, ds_cloud_mask.RasterYSize)
"""
# loop through bands (beware order of bands is R,G,B,IR - so band[1] is red band
for b in range( dataset.RasterCount ):
# read raster band
band = dataset.GetRasterBand(b+1).ReadAsArray(0, 0, dataset.RasterXSize, dataset.RasterYSize)
# apply masks
band = numpy.where((LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# PREVIOUS LINE WITH CLOUD MASK : band = numpy.where((( cloud_mask==5) | (cloud_mask==4)) & (LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# band = numpy.where(LW_mask==0, band, 0)
# convert DN to TOA reflectance
band = band.astype(float)
band = band / QUANT
# convert TOA reflectance to TOA radiance
band = (band * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# convert 2D array to 1D array, discard zeros and estimate 1% percentile calculation
tmp = band.reshape(-1)
tmp = tmp[tmp!=0.0]
Lmin1 = numpy.percentile(tmp,1)
# calculate path radiance
Lp = Lmin1 - (0.01 * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# calculate corrected reflectance
band_BOA = (math.pi * (band - Lp) * math.pow(dst_earth_sun,2)) / (float(ESUN[b]) * math.cos(math.radians(thetas)))
# write output data
outFile = outFolder2+'/TOC/' + imageName + '_B0' + str(bandNum10m[b]) + '_TOC.tif'
DataType = gdal.GDT_Float32
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(outFile, nl, nc, 1, DataType)
outdata.SetGeoTransform((ULX, pixelWidth, 0, ULY, 0, pixelHeight)) # Georeference the image
outdata.SetProjection(wkt_projection) # Write projection information
outdata.GetRasterBand(1).WriteArray(band_BOA) # Write the array to the file
outdata = None
# clear datasets
dataset = None
ds_LW_mask = None
ds_cloud_mask = None
# remove tmp_mask file
os.remove(outFolder2 + '/LW_mask.tif')
print('DONE\n')
else:
print('Atmospheric correction already done\n')
# plt.imshow(bandB_mask)
# plt.show()
# --- Turbidity DOGLIOTTI 2015
print('compute turbidity')
# read red and IR bands
redBOAFile = outFolder2 + '/TOC/' + imageName + '_B04_TOC.tif'
ds_redBOA = gdal.Open(redBOAFile, GA_ReadOnly)
if ds_redBOA is None:
print('Unable to open red band')
sys.exit(1)
redBOA = ds_redBOA.GetRasterBand(1).ReadAsArray(0, 0, ds_redBOA.RasterXSize, ds_redBOA.RasterYSize)
irBOAFile = outFolder2+'/TOC/' + imageName + '_B08_TOC.tif'
ds_irBOA = gdal.Open(irBOAFile, GA_ReadOnly)
if ds_irBOA is None:
print('Unable to open ir band')
sys.exit(1)
irBOA = ds_irBOA.GetRasterBand(1).ReadAsArray(0, 0, ds_irBOA.RasterXSize, ds_irBOA.RasterYSize)
# create numpy array filled with nan for turbidity
TURB = numpy.full(redBOA.shape, numpy.nan)
# dogliotti parameters for S2 red and IR bands obtained from ACOLITE ancilliary data
A_red = 366.14
C_red = 0.19563
A_ir = 1913.65
C_ir = 0.1913
# estimate turbidity with DOGLIOTTI 2015 approach
for i in range(0,nc-1):
for j in range(0,nl-1):
if redBOA[i,j] > 0 and irBOA[i,j] > 0: # avoid any pixel with negative or zero value
if redBOA[i,j] < 0.05:
TURB[i,j] = (A_red * redBOA[i,j]) / (1 - (redBOA[i,j] / C_red))
elif redBOA[i,j] > 0.07:
TURB[i,j] = (A_ir * irBOA[i,j]) / (1 - (irBOA[i,j] / C_ir))
elif redBOA[i,j] >= 0.05 and redBOA[i,j] <= 0.07:
w = (0.07 - redBOA[i,j])/(0.07 - 0.05)
TURB_red = (A_red * redBOA[i,j]) / (1 - (redBOA[i,j] / C_red))
TURB_ir = (A_ir * irBOA[i,j]) / (1 - (irBOA[i,j] / C_ir))
TURB[i,j] = w * TURB_red + (1 - w) * TURB_ir
# display progress
step = round(nc/10)
if i % step == 0 and i != 0:
print (str(int(round(i/float(nc)*100))) + '% turbidity completed')
# write output data
if not os.path.exists(outFolder2 + '/TURB'):
os.mkdir(outFolder2 + '/TURB')
outFile_TURB = outFolder2 + '/TURB/T_DOGLIOTTI2015.tif'
DataType = gdal.GDT_Float32
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(outFile_TURB, nl, nc, 1, DataType)
outdata.SetGeoTransform((ULX, pixelWidth, 0, ULY, 0, pixelHeight)) # Georeference the image
outdata.SetProjection(wkt_projection) # Write projection information
outdata.GetRasterBand(1).WriteArray(TURB) # Write the array to the file
outdata = None
ds_redBOA = None
ds_irBOA = None
print('DONE\n') | help="path to folder containing S2 data (first folder, not IMG_DATA)",
metavar='<string>')
parser.add_argument("-outFolder", | random_line_split |
SEODWARF_S2_TURB_fMask.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on the 23 March 2017
author : olivier regniers (i-sea)
Application : reads metadata, applies coastline mask, performs atmospheric correction using DOS1 approach and export result in outFolder, estimates Turbidity with method from Dogliotti 2015 based on Red and NIR reflectance and exports result as an image, writes an xml file containing metadata about the image and statistics on estimated turbidity
example : python C:\Users\ucfadko\Desktop\SEODWARF_S2_TURB_fMask.py -inFolder C:\Users\ucfadko\Desktop\S2A_MSIL1C_20170203T083141_N0204_R021_T36SWD_20170203T083527 -outFolder C:\Users\ucfadko\Desktop\S2_Results
Milto's example: python C:\Users\Milto\Documents\TEPAK\RISE\SEO_DWARF_algorithms\Code\SEODWARF_S2_TURB_fMask.py -inFolder C:\Users\Milto\Documents\TEPAK\RISE\TestData\S2A_MSIL1C_20170225T091021_N0204_R050_T35SLA_20170225T091220 -outFolder C:\Users\Milto\Documents\TEPAK\RISE\TestResults
note1 : classical folder and files structure for raw L1C S2 data has to be conserved for the code to run properly
note2 : code needs a coastline shapefile to apply land/water mask, check within the code if path to shapefile is correct (around line 170)
note 3 : code needs fmask to perform cloud masking, check within the code if path to fmask python functions is correct (around line 126) or add directory to working path
"""
import os
import sys
import gdal, ogr
from gdalconst import *
import argparse
import subprocess
import numpy
import math
import xml.etree.ElementTree as ET
import glob
import re
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument("-inFolder",
required=True,
help="path to folder containing S2 data (first folder, not IMG_DATA)",
metavar='<string>')
parser.add_argument("-outFolder",
required=True,
help="path to folder where results are stored",
metavar='<string>')
params = vars(parser.parse_args())
inFolder = params['inFolder']
outFolder = params['outFolder']
# recover list of bands to be processed
bandNum10m = [4,3,2,8]
bandNumAll = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12']
bandList = []
allFiles = glob.glob(inFolder + '\*\GRANULE\*\IMG_DATA\*.jp2')
for b in range(len(bandNumAll)):
for file in allFiles:
if file.endswith('_%s.jp2' % bandNumAll[b]):
bandList.append(file)
# recover metadata file
os.listdir(inFolder + '/GRANULE/')
MTD_MSIL1C = inFolder + '/MTD_MSIL1C.xml'
MTD_TL = inFolder + '/GRANULE/' + os.listdir(inFolder + '/GRANULE/')[0] + '/MTD_TL.xml'
# --- read metadata ---
print('Read metadata files')
# search tile name in input folder
match = re.search('([A-Z][1-9][1-9][A-Z][A-Z][A-Z])', inFolder)
tile = match.group(0)
EPSG_code = 'EPSG_326' + tile[1:3]
dataset = gdal.Open('SENTINEL2_L1C:%s:10m:%s' % (MTD_MSIL1C, EPSG_code), GA_ReadOnly)
if dataset is None:
print('Unable to open image')
sys.exit(1)
MTD = dataset.GetMetadata()
wkt_projection =dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
# print MTD
# read metadata per band
bandB = dataset.GetRasterBand(3)
bandG = dataset.GetRasterBand(2)
bandR = dataset.GetRasterBand(1)
bandIR = dataset.GetRasterBand(4)
MTD_B = bandB.GetMetadata()
MTD_G = bandG.GetMetadata()
MTD_R = bandR.GetMetadata()
MTD_IR = bandIR.GetMetadata()
# --- recover values from metadata ---
ULX = geotransform[0]
ULY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
nl = dataset.RasterYSize
nc = dataset.RasterXSize
DATE = MTD['GENERATION_TIME'][0:10]
HOUR = MTD['GENERATION_TIME'][11:16]
if MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2A':
sensor = 'S2A'
elif MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2B':
sensor = 'S2B'
imageNameLong = MTD['PRODUCT_URI']
# pos_ = [pos for pos, char in enumerate(imageNameLong) if char == '_']
# tile = imageNameLong[pos_[4]+1:pos_[4]+7]
dst_earth_sun = float(MTD['REFLECTANCE_CONVERSION_U'])
QUANT = int(MTD['QUANTIFICATION_VALUE'])
ESUN = [MTD_R['SOLAR_IRRADIANCE'], MTD_G['SOLAR_IRRADIANCE'], MTD_B['SOLAR_IRRADIANCE'], MTD_IR['SOLAR_IRRADIANCE']]
# read xml file and get extra metadata
root = ET.parse(MTD_TL).getroot()
tmp = root.find('.//Mean_Sun_Angle/ZENITH_ANGLE')
thetas = 90 - float(tmp.text)
print('DONE\n')
# --- fMask cloud masking ---
# print('Cloud mask with fMask')
# create ouput image name and output folder
imageName = sensor + '_' + DATE + '_' + tile
outFolder2 = outFolder + '/' + imageName
"""
if not os.path.exists(outFolder2):
os.mkdir(outFolder2)
out_cloud_Mask = outFolder2 + '/cloud_FMask.tif'
if not os.path.isfile(out_cloud_Mask): # if mask doesn't already exist
os.chdir('C:\Users\Olivier\Anaconda2\Scripts') # change path to fmask folder
# create virtual raster
outVRT = outFolder2 + '/allbands.vrt'
cmd = 'gdalbuildvrt -resolution user -tr 20 20 -separate ' + outVRT
for b in bandList:
cmd = cmd + ' ' + b
subprocess.call(cmd, shell=True)
# create angle image
outAngles = outFolder2 + '/angles.img'
cmd = 'python fmask_sentinel2makeAnglesImage.py -i ' + MTD_TL + ' -o ' + outAngles
subprocess.call(cmd, shell=True)
# create mask
outFMASK = outFolder2 + '/cloud.img'
cmd = 'python fmask_sentinel2Stacked.py -a ' + outVRT + ' -z ' + outAngles + ' -o ' + outFMASK
subprocess.call(cmd, shell=True)
# resample mask
cmd = 'gdalwarp -tr 10 10 -ot Byte ' + outFMASK + ' ' + out_cloud_Mask
subprocess.call(cmd, shell=True)
print('DONE\n')
else:
print('Cloud masking already done\n')
"""
# --- DOS1 correction ---
# rasterize input shapefile mask
print('DOS1 atmospheric correction')
# check if DOS1 correction has already been applied
if not os.path.exists(outFolder2 + '/TOC'):
|
DOS_red = outFolder2+'/TOC/' + imageName + '_B04_TOC.tif'
if not os.path.isfile(DOS_red): # if outFile does not already exist
pathMaskShp = 'C:/Users/ucfadko/Desktop/Coastline_EUROPE_UTMZ33N/Coastline_EUROPE_UTMZ33N.shp'
outLW_Mask = outFolder2 + '/LW_mask.tif'
# check if shapefile exists
if not os.path.isfile(pathMaskShp):
print('Coastline shapefile is not in the right folder')
sys.exit(1)
Xmin = ULX
Xmax = ULX + nc*pixelWidth
Ymin = ULY - nl*pixelWidth
Ymax = ULY
print ('Water/Land mask creation')
cmd = 'gdal_rasterize -a id -ot Byte -te ' + str(Xmin) + ' ' + str(Ymin) + ' ' + str(Xmax) + ' ' + str(Ymax) + ' -tr ' + str(pixelWidth)+ ' ' + str(pixelWidth) + ' ' + pathMaskShp + ' ' + outLW_Mask
# print cmd
subprocess.call(cmd, shell=True)
print ('DONE\n')
# read land/water mask
ds_LW_mask = gdal.Open(outLW_Mask, GA_ReadOnly)
if ds_LW_mask is None:
print('Unable to open land/water mask')
sys.exit(1)
LW_mask = ds_LW_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_LW_mask.RasterXSize, ds_LW_mask.RasterYSize)
"""
# read cloud mask
ds_cloud_mask = gdal.Open(out_cloud_Mask, GA_ReadOnly)
if ds_cloud_mask is None:
print('Unable to open cloud mask')
sys.exit(1)
cloud_mask = ds_cloud_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_cloud_mask.RasterXSize, ds_cloud_mask.RasterYSize)
"""
# loop through bands (beware order of bands is R,G,B,IR - so band[1] is red band
for b in range( dataset.RasterCount ):
# read raster band
band = dataset.GetRasterBand(b+1).ReadAsArray(0, 0, dataset.RasterXSize, dataset.RasterYSize)
# apply masks
band = numpy.where((LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# PREVIOUS LINE WITH CLOUD MASK : band = numpy.where((( cloud_mask==5) | (cloud_mask==4)) & (LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# band = numpy.where(LW_mask==0, band, 0)
# convert DN to TOA reflectance
band = band.astype(float)
band = band / QUANT
# convert TOA reflectance to TOA radiance
band = (band * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# convert 2D array to 1D array, discard zeros and estimate 1% percentile calculation
tmp = band.reshape(-1)
tmp = tmp[tmp!=0.0]
Lmin1 = numpy.percentile(tmp,1)
# calculate path radiance
Lp = Lmin1 - (0.01 * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# calculate corrected reflectance
band_BOA = (math.pi * (band - Lp) * math.pow(dst_earth_sun,2)) / (float(ESUN[b]) * math.cos(math.radians(thetas)))
# write output data
outFile = outFolder2+'/TOC/' + imageName + '_B0' + str(bandNum10m[b]) + '_TOC.tif'
DataType = gdal.GDT_Float32
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(outFile, nl, nc, 1, DataType)
outdata.SetGeoTransform((ULX, pixelWidth, 0, ULY, 0, pixelHeight)) # Georeference the image
outdata.SetProjection(wkt_projection) # Write projection information
outdata.GetRasterBand(1).WriteArray(band_BOA) # Write the array to the file
outdata = None
# clear datasets
dataset = None
ds_LW_mask = None
ds_cloud_mask = None
# remove tmp_mask file
os.remove(outFolder2 + '/LW_mask.tif')
print('DONE\n')
else:
print('Atmospheric correction already done\n')
# plt.imshow(bandB_mask)
# plt.show()
# --- Turbidity DOGLIOTTI 2015
print('compute turbidity')
# read red and IR bands
redBOAFile = outFolder2 + '/TOC/' + imageName + '_B04_TOC.tif'
ds_redBOA = gdal.Open(redBOAFile, GA_ReadOnly)
if ds_redBOA is None:
print('Unable to open red band')
sys.exit(1)
redBOA = ds_redBOA.GetRasterBand(1).ReadAsArray(0, 0, ds_redBOA.RasterXSize, ds_redBOA.RasterYSize)
irBOAFile = outFolder2+'/TOC/' + imageName + '_B08_TOC.tif'
ds_irBOA = gdal.Open(irBOAFile, GA_ReadOnly)
if ds_irBOA is None:
print('Unable to open ir band')
sys.exit(1)
irBOA = ds_irBOA.GetRasterBand(1).ReadAsArray(0, 0, ds_irBOA.RasterXSize, ds_irBOA.RasterYSize)
# create numpy array filled with nan for turbidity
TURB = numpy.full(redBOA.shape, numpy.nan)
# dogliotti parameters for S2 red and IR bands obtained from ACOLITE ancilliary data
A_red = 366.14
C_red = 0.19563
A_ir = 1913.65
C_ir = 0.1913
# estimate turbidity with DOGLIOTTI 2015 approach
for i in range(0,nc-1):
for j in range(0,nl-1):
if redBOA[i,j] > 0 and irBOA[i,j] > 0: # avoid any pixel with negative or zero value
if redBOA[i,j] < 0.05:
TURB[i,j] = (A_red * redBOA[i,j]) / (1 - (redBOA[i,j] / C_red))
elif redBOA[i,j] > 0.07:
TURB[i,j] = (A_ir * irBOA[i,j]) / (1 - (irBOA[i,j] / C_ir))
elif redBOA[i,j] >= 0.05 and redBOA[i,j] <= 0.07:
w = (0.07 - redBOA[i,j])/(0.07 - 0.05)
TURB_red = (A_red * redBOA[i,j]) / (1 - (redBOA[i,j] / C_red))
TURB_ir = (A_ir * irBOA[i,j]) / (1 - (irBOA[i,j] / C_ir))
TURB[i,j] = w * TURB_red + (1 - w) * TURB_ir
# display progress
step = round(nc/10)
if i % step == 0 and i != 0:
print (str(int(round(i/float(nc)*100))) + '% turbidity completed')
# write output data
if not os.path.exists(outFolder2 + '/TURB'):
os.mkdir(outFolder2 + '/TURB')
outFile_TURB = outFolder2 + '/TURB/T_DOGLIOTTI2015.tif'
DataType = gdal.GDT_Float32
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(outFile_TURB, nl, nc, 1, DataType)
outdata.SetGeoTransform((ULX, pixelWidth, 0, ULY, 0, pixelHeight)) # Georeference the image
outdata.SetProjection(wkt_projection) # Write projection information
outdata.GetRasterBand(1).WriteArray(TURB) # Write the array to the file
outdata = None
ds_redBOA = None
ds_irBOA = None
print('DONE\n')
| os.mkdir(outFolder2 + '/TOC') | conditional_block |
canvas.go | package pixelgl
import (
"fmt"
"image/color"
"github.com/faiface/glhf"
"github.com/faiface/mainthread"
"github.com/faiface/pixel"
"github.com/go-gl/mathgl/mgl32"
"github.com/pkg/errors"
)
// Canvas is an off-screen rectangular BasicTarget and Picture at the same time, that you can draw
// onto.
//
// It supports TrianglesPosition, TrianglesColor, TrianglesPicture and PictureColor.
type Canvas struct {
gf *GLFrame
shader *glhf.Shader
cmp pixel.ComposeMethod
mat mgl32.Mat3
col mgl32.Vec4
smooth bool
sprite *pixel.Sprite
}
var _ pixel.ComposeTarget = (*Canvas)(nil)
// NewCanvas creates a new empty, fully transparent Canvas with given bounds.
func NewCanvas(bounds pixel.Rect) *Canvas {
c := &Canvas{
gf: NewGLFrame(bounds),
mat: mgl32.Ident3(),
col: mgl32.Vec4{1, 1, 1, 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) |
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) Draw(t pixel.TargetTriangles) {
ct := t.(*canvasTriangles)
if cp.dst != ct.dst {
panic(fmt.Errorf("(%T).Draw: TargetTriangles generated by different Canvas", cp))
}
ct.draw(cp.GLPicture.Texture(), cp.GLPicture.Bounds())
}
const (
canvasPosition int = iota
canvasColor
canvasTexCoords
canvasIntensity
)
var canvasVertexFormat = glhf.AttrFormat{
canvasPosition: {Name: "position", Type: glhf.Vec2},
canvasColor: {Name: "color", Type: glhf.Vec4},
canvasTexCoords: {Name: "texCoords", Type: glhf.Vec2},
canvasIntensity: {Name: "intensity", Type: glhf.Float},
}
const (
canvasTransform int = iota
canvasColorMask
canvasBounds
canvasTexBounds
)
var canvasUniformFormat = glhf.AttrFormat{
canvasTransform: {Name: "transform", Type: glhf.Mat3},
canvasColorMask: {Name: "colorMask", Type: glhf.Vec4},
canvasBounds: {Name: "bounds", Type: glhf.Vec4},
canvasTexBounds: {Name: "texBounds", Type: glhf.Vec4},
}
var canvasVertexShader = `
#version 330 core
in vec2 position;
in vec4 color;
in vec2 texCoords;
in float intensity;
out vec4 Color;
out vec2 TexCoords;
out float Intensity;
uniform mat3 transform;
uniform vec4 bounds;
void main() {
vec2 transPos = (transform * vec3(position, 1.0)).xy;
vec2 normPos = (transPos - bounds.xy) / bounds.zw * 2 - vec2(1, 1);
gl_Position = vec4(normPos, 0.0, 1.0);
Color = color;
TexCoords = texCoords;
Intensity = intensity;
}
`
var canvasFragmentShader = `
#version 330 core
in vec4 Color;
in vec2 TexCoords;
in float Intensity;
out vec4 color;
uniform vec4 colorMask;
uniform vec4 texBounds;
uniform sampler2D tex;
void main() {
if (Intensity == 0) {
color = colorMask * Color;
} else {
color = vec4(0, 0, 0, 0);
color += (1 - Intensity) * Color;
vec2 t = (TexCoords - texBounds.xy) / texBounds.zw;
color += Intensity * Color * texture(tex, t);
color *= colorMask;
}
}
`
| {
c.sprite.DrawColorMask(t, matrix, mask)
} | identifier_body |
canvas.go | package pixelgl
import (
"fmt"
"image/color"
"github.com/faiface/glhf"
"github.com/faiface/mainthread"
"github.com/faiface/pixel"
"github.com/go-gl/mathgl/mgl32"
"github.com/pkg/errors"
)
// Canvas is an off-screen rectangular BasicTarget and Picture at the same time, that you can draw
// onto.
//
// It supports TrianglesPosition, TrianglesColor, TrianglesPicture and PictureColor.
type Canvas struct {
gf *GLFrame
shader *glhf.Shader
cmp pixel.ComposeMethod
mat mgl32.Mat3
col mgl32.Vec4
smooth bool
sprite *pixel.Sprite
}
var _ pixel.ComposeTarget = (*Canvas)(nil)
// NewCanvas creates a new empty, fully transparent Canvas with given bounds.
func NewCanvas(bounds pixel.Rect) *Canvas {
c := &Canvas{
gf: NewGLFrame(bounds),
mat: mgl32.Ident3(),
col: mgl32.Vec4{1, 1, 1, 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End() | func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) Draw(t pixel.TargetTriangles) {
ct := t.(*canvasTriangles)
if cp.dst != ct.dst {
panic(fmt.Errorf("(%T).Draw: TargetTriangles generated by different Canvas", cp))
}
ct.draw(cp.GLPicture.Texture(), cp.GLPicture.Bounds())
}
const (
canvasPosition int = iota
canvasColor
canvasTexCoords
canvasIntensity
)
var canvasVertexFormat = glhf.AttrFormat{
canvasPosition: {Name: "position", Type: glhf.Vec2},
canvasColor: {Name: "color", Type: glhf.Vec4},
canvasTexCoords: {Name: "texCoords", Type: glhf.Vec2},
canvasIntensity: {Name: "intensity", Type: glhf.Float},
}
const (
canvasTransform int = iota
canvasColorMask
canvasBounds
canvasTexBounds
)
var canvasUniformFormat = glhf.AttrFormat{
canvasTransform: {Name: "transform", Type: glhf.Mat3},
canvasColorMask: {Name: "colorMask", Type: glhf.Vec4},
canvasBounds: {Name: "bounds", Type: glhf.Vec4},
canvasTexBounds: {Name: "texBounds", Type: glhf.Vec4},
}
var canvasVertexShader = `
#version 330 core
in vec2 position;
in vec4 color;
in vec2 texCoords;
in float intensity;
out vec4 Color;
out vec2 TexCoords;
out float Intensity;
uniform mat3 transform;
uniform vec4 bounds;
void main() {
vec2 transPos = (transform * vec3(position, 1.0)).xy;
vec2 normPos = (transPos - bounds.xy) / bounds.zw * 2 - vec2(1, 1);
gl_Position = vec4(normPos, 0.0, 1.0);
Color = color;
TexCoords = texCoords;
Intensity = intensity;
}
`
var canvasFragmentShader = `
#version 330 core
in vec4 Color;
in vec2 TexCoords;
in float Intensity;
out vec4 color;
uniform vec4 colorMask;
uniform vec4 texBounds;
uniform sampler2D tex;
void main() {
if (Intensity == 0) {
color = colorMask * Color;
} else {
color = vec4(0, 0, 0, 0);
color += (1 - Intensity) * Color;
vec2 t = (TexCoords - texBounds.xy) / texBounds.zw;
color += Intensity * Color * texture(tex, t);
color *= colorMask;
}
}
` | })
}
// Color returns the color of the pixel over the given position inside the Canvas. | random_line_split |
canvas.go | package pixelgl
import (
"fmt"
"image/color"
"github.com/faiface/glhf"
"github.com/faiface/mainthread"
"github.com/faiface/pixel"
"github.com/go-gl/mathgl/mgl32"
"github.com/pkg/errors"
)
// Canvas is an off-screen rectangular BasicTarget and Picture at the same time, that you can draw
// onto.
//
// It supports TrianglesPosition, TrianglesColor, TrianglesPicture and PictureColor.
type Canvas struct {
gf *GLFrame
shader *glhf.Shader
cmp pixel.ComposeMethod
mat mgl32.Mat3
col mgl32.Vec4
smooth bool
sprite *pixel.Sprite
}
var _ pixel.ComposeTarget = (*Canvas)(nil)
// NewCanvas creates a new empty, fully transparent Canvas with given bounds.
func NewCanvas(bounds pixel.Rect) *Canvas {
c := &Canvas{
gf: NewGLFrame(bounds),
mat: mgl32.Ident3(),
col: mgl32.Vec4{1, 1, 1, 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) | (t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) Draw(t pixel.TargetTriangles) {
ct := t.(*canvasTriangles)
if cp.dst != ct.dst {
panic(fmt.Errorf("(%T).Draw: TargetTriangles generated by different Canvas", cp))
}
ct.draw(cp.GLPicture.Texture(), cp.GLPicture.Bounds())
}
const (
canvasPosition int = iota
canvasColor
canvasTexCoords
canvasIntensity
)
var canvasVertexFormat = glhf.AttrFormat{
canvasPosition: {Name: "position", Type: glhf.Vec2},
canvasColor: {Name: "color", Type: glhf.Vec4},
canvasTexCoords: {Name: "texCoords", Type: glhf.Vec2},
canvasIntensity: {Name: "intensity", Type: glhf.Float},
}
const (
canvasTransform int = iota
canvasColorMask
canvasBounds
canvasTexBounds
)
var canvasUniformFormat = glhf.AttrFormat{
canvasTransform: {Name: "transform", Type: glhf.Mat3},
canvasColorMask: {Name: "colorMask", Type: glhf.Vec4},
canvasBounds: {Name: "bounds", Type: glhf.Vec4},
canvasTexBounds: {Name: "texBounds", Type: glhf.Vec4},
}
var canvasVertexShader = `
#version 330 core
in vec2 position;
in vec4 color;
in vec2 texCoords;
in float intensity;
out vec4 Color;
out vec2 TexCoords;
out float Intensity;
uniform mat3 transform;
uniform vec4 bounds;
void main() {
vec2 transPos = (transform * vec3(position, 1.0)).xy;
vec2 normPos = (transPos - bounds.xy) / bounds.zw * 2 - vec2(1, 1);
gl_Position = vec4(normPos, 0.0, 1.0);
Color = color;
TexCoords = texCoords;
Intensity = intensity;
}
`
var canvasFragmentShader = `
#version 330 core
in vec4 Color;
in vec2 TexCoords;
in float Intensity;
out vec4 color;
uniform vec4 colorMask;
uniform vec4 texBounds;
uniform sampler2D tex;
void main() {
if (Intensity == 0) {
color = colorMask * Color;
} else {
color = vec4(0, 0, 0, 0);
color += (1 - Intensity) * Color;
vec2 t = (TexCoords - texBounds.xy) / texBounds.zw;
color += Intensity * Color * texture(tex, t);
color *= colorMask;
}
}
`
| DrawColorMask | identifier_name |
canvas.go | package pixelgl
import (
"fmt"
"image/color"
"github.com/faiface/glhf"
"github.com/faiface/mainthread"
"github.com/faiface/pixel"
"github.com/go-gl/mathgl/mgl32"
"github.com/pkg/errors"
)
// Canvas is an off-screen rectangular BasicTarget and Picture at the same time, that you can draw
// onto.
//
// It supports TrianglesPosition, TrianglesColor, TrianglesPicture and PictureColor.
type Canvas struct {
gf *GLFrame
shader *glhf.Shader
cmp pixel.ComposeMethod
mat mgl32.Mat3
col mgl32.Vec4
smooth bool
sprite *pixel.Sprite
}
var _ pixel.ComposeTarget = (*Canvas)(nil)
// NewCanvas creates a new empty, fully transparent Canvas with given bounds.
func NewCanvas(bounds pixel.Rect) *Canvas {
c := &Canvas{
gf: NewGLFrame(bounds),
mat: mgl32.Ident3(),
col: mgl32.Vec4{1, 1, 1, 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m |
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) Draw(t pixel.TargetTriangles) {
ct := t.(*canvasTriangles)
if cp.dst != ct.dst {
panic(fmt.Errorf("(%T).Draw: TargetTriangles generated by different Canvas", cp))
}
ct.draw(cp.GLPicture.Texture(), cp.GLPicture.Bounds())
}
const (
canvasPosition int = iota
canvasColor
canvasTexCoords
canvasIntensity
)
var canvasVertexFormat = glhf.AttrFormat{
canvasPosition: {Name: "position", Type: glhf.Vec2},
canvasColor: {Name: "color", Type: glhf.Vec4},
canvasTexCoords: {Name: "texCoords", Type: glhf.Vec2},
canvasIntensity: {Name: "intensity", Type: glhf.Float},
}
const (
canvasTransform int = iota
canvasColorMask
canvasBounds
canvasTexBounds
)
var canvasUniformFormat = glhf.AttrFormat{
canvasTransform: {Name: "transform", Type: glhf.Mat3},
canvasColorMask: {Name: "colorMask", Type: glhf.Vec4},
canvasBounds: {Name: "bounds", Type: glhf.Vec4},
canvasTexBounds: {Name: "texBounds", Type: glhf.Vec4},
}
var canvasVertexShader = `
#version 330 core
in vec2 position;
in vec4 color;
in vec2 texCoords;
in float intensity;
out vec4 Color;
out vec2 TexCoords;
out float Intensity;
uniform mat3 transform;
uniform vec4 bounds;
void main() {
vec2 transPos = (transform * vec3(position, 1.0)).xy;
vec2 normPos = (transPos - bounds.xy) / bounds.zw * 2 - vec2(1, 1);
gl_Position = vec4(normPos, 0.0, 1.0);
Color = color;
TexCoords = texCoords;
Intensity = intensity;
}
`
var canvasFragmentShader = `
#version 330 core
in vec4 Color;
in vec2 TexCoords;
in float Intensity;
out vec4 color;
uniform vec4 colorMask;
uniform vec4 texBounds;
uniform sampler2D tex;
void main() {
if (Intensity == 0) {
color = colorMask * Color;
} else {
color = vec4(0, 0, 0, 0);
color += (1 - Intensity) * Color;
vec2 t = (TexCoords - texBounds.xy) / texBounds.zw;
color += Intensity * Color * texture(tex, t);
color *= colorMask;
}
}
`
| {
c.mat[i] = float32(m[i])
} | conditional_block |
mod.rs | //! A builder for a Fletcher-like algorithm.
//!
//! The basic functionality of this algorithm is:
//! * there is a sum which is just the bytes summed modulo some number
//! * there is also a second sum which the sum of all of the normal sums (modulo the same number)
//!
//! Note that text word sizes are currently only `u8`.
//!
//! It works roughly like this:
//! ```
//! # fn check(file: &[u8]) -> u32 {
//! # let module = 0xfff1u32;
//! # let init = 1;
//! # let (addout1, addout2) = (0, 0);
//! # let hwidth = 16;
//! let mut sum1 = init;
//! let mut sum2 = 0;
//! for byte in file {
//! sum1 = (sum1 + *byte as u32) % module;
//! sum2 = (sum2 + sum1) % module;
//! }
//! return (sum2 + addout2) % module << hwidth | (sum1 + addout1) % module;
//! # }
//! ```
//! Normally, the sum is represented as the cumulative sum bitshifted to be above the regular sum.
//! This representation will be referred to as "compact".
//!
//! These are the parameters:
//! * width: Total number of bits of the checksum (twice the amount of bits of the individual sums)
//! * module: The number by which both sums get reduced
//! * init: The initial value of the regular sum
//! * addout: The value that gets added at the end, compact
//! * swap: Whether to swap the values in the compact representation, i.e. put the regular sum above the cumulative sum
//! * check: The checksum of the bytes "123456789", checked to be correct on build
//! * name: The name to be used when displaying the algorithm (optional)
//!
//! Note that the `init` parameter, unlike the `addout` parameter, is not compact and is only added to the regular sum,
//! as for the cumulative sum, it is equivalent to the addout (so you can just add the cumulative `init` to the cumulative `addout`).
mod rev;
use crate::bitnum::{BitNum, Modnum};
use crate::checksum::{CheckBuilderErr, Digest, LinearCheck};
use crate::endian::{Endian, WordSpec};
use crate::keyval::KeyValIter;
use num_traits::{One, Zero};
pub use rev::reverse_fletcher;
#[cfg(feature = "parallel")]
pub use rev::reverse_fletcher_para;
use std::fmt::Display;
use std::str::FromStr;
/// A builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self |
/// The endian of the words of the input file
pub fn inendian(&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else {
fletch.init = init;
};
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.inendian(x)),
"wordsize" => usize::from_str(¤t_val)
.ok()
.map(|x| fletch.wordsize(x)),
"out_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.outendian(x)),
"check" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.check(x)),
"name" => Some(fletch.name(¤t_val)),
_ => return Err(CheckBuilderErr::UnknownKey(current_key)),
};
match fletch_op {
Some(f) => fletch = f.clone(),
None => return Err(CheckBuilderErr::MalformedString(current_key)),
}
}
Ok(fletch)
}
type Err = CheckBuilderErr;
}
impl<Sum: Modnum> FromStr for Fletcher<Sum> {
/// Construct a new fletcher sum algorithm from a string.
/// Note that all parameters except width are in hexadecimal.
///
/// Example:
///
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// # use std::str::FromStr;
/// Fletcher::<u32>::from_str("width=32 init=1 module=0xfff1 name=\"adler-32\"").is_ok();
/// ```
fn from_str(s: &str) -> Result<Fletcher<Sum>, CheckBuilderErr> {
FletcherBuilder::<Sum>::from_str(s)?.build()
}
type Err = CheckBuilderErr;
}
impl<S: Modnum> Digest for Fletcher<S> {
type Sum = S::Double;
fn init(&self) -> Self::Sum {
self.to_compact((self.init, S::zero()))
}
fn dig_word(&self, sum: Self::Sum, word: u64) -> Self::Sum {
let (mut s, mut c) = self.from_compact(sum);
let modword = S::mod_from(word, &self.module);
s = S::add_mod(s, &modword, &self.module);
c = S::add_mod(c, &s, &self.module);
self.to_compact((s, c))
}
fn finalize(&self, sum: Self::Sum) -> Self::Sum {
self.add(sum, &self.addout)
}
fn to_bytes(&self, s: Self::Sum) -> Vec<u8> {
self.wordspec.output_to_bytes(s, 2 * self.hwidth)
}
fn wordspec(&self) -> WordSpec {
self.wordspec
}
}
impl<S: Modnum> LinearCheck for Fletcher<S> {
type Shift = S;
fn init_shift(&self) -> Self::Shift {
S::zero()
}
fn inc_shift(&self, shift: Self::Shift) -> Self::Shift {
S::add_mod(shift, &S::one(), &self.module)
}
fn shift(&self, sum: Self::Sum, shift: &Self::Shift) -> Self::Sum {
let (s, mut c) = self.from_compact(sum);
let shift_diff = S::mul_mod(s, shift, &self.module);
c = S::add_mod(c, &shift_diff, &self.module);
self.to_compact((s, c))
}
fn add(&self, sum_a: Self::Sum, sum_b: &Self::Sum) -> Self::Sum {
let (sa, ca) = self.from_compact(sum_a);
let (sb, cb) = self.from_compact(*sum_b);
let sum_s = sa.add_mod(&sb, &self.module);
let sum_c = ca.add_mod(&cb, &self.module);
self.to_compact((sum_s, sum_c))
}
fn negate(&self, sum: Self::Sum) -> Self::Sum {
let (s, c) = self.from_compact(sum);
self.to_compact((s.neg_mod(&self.module), c.neg_mod(&self.module)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::checksum::tests::{check_example, test_find, test_prop, test_shifts};
use std::str::FromStr;
#[test]
fn adler32() {
let adel = Fletcher::<u16>::with_options()
.width(32)
.init(1)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&adel);
test_find(&adel);
test_prop(&adel);
check_example(&adel, 0x81bfd25f);
let nobel = Fletcher::with_options()
.width(32)
.init(1u32)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&nobel);
test_find(&nobel);
test_prop(&adel);
check_example(&nobel, 0x81bfd25f);
}
#[test]
fn fletcher16() {
let f16 = Fletcher::with_options()
.width(16)
.module(0xffu8)
.check(0x1ede)
.build()
.unwrap();
test_shifts(&f16);
test_find(&f16);
test_prop(&f16);
check_example(&f16, 0x7815);
}
#[test]
fn fletcher8() {
let f8 = Fletcher::<u8>::from_str("width=8 module=f init=0 addout=0 swap=false check=0xc")
.unwrap();
test_shifts(&f8);
test_prop(&f8);
check_example(&f8, 0x6);
}
}
| {
self.swap = Some(s);
self
} | identifier_body |
mod.rs | //! A builder for a Fletcher-like algorithm.
//!
//! The basic functionality of this algorithm is:
//! * there is a sum which is just the bytes summed modulo some number
//! * there is also a second sum which the sum of all of the normal sums (modulo the same number)
//!
//! Note that text word sizes are currently only `u8`.
//!
//! It works roughly like this:
//! ```
//! # fn check(file: &[u8]) -> u32 {
//! # let module = 0xfff1u32;
//! # let init = 1;
//! # let (addout1, addout2) = (0, 0);
//! # let hwidth = 16;
//! let mut sum1 = init;
//! let mut sum2 = 0;
//! for byte in file {
//! sum1 = (sum1 + *byte as u32) % module;
//! sum2 = (sum2 + sum1) % module;
//! }
//! return (sum2 + addout2) % module << hwidth | (sum1 + addout1) % module;
//! # }
//! ```
//! Normally, the sum is represented as the cumulative sum bitshifted to be above the regular sum.
//! This representation will be referred to as "compact".
//!
//! These are the parameters:
//! * width: Total number of bits of the checksum (twice the amount of bits of the individual sums)
//! * module: The number by which both sums get reduced
//! * init: The initial value of the regular sum
//! * addout: The value that gets added at the end, compact
//! * swap: Whether to swap the values in the compact representation, i.e. put the regular sum above the cumulative sum
//! * check: The checksum of the bytes "123456789", checked to be correct on build
//! * name: The name to be used when displaying the algorithm (optional)
//!
//! Note that the `init` parameter, unlike the `addout` parameter, is not compact and is only added to the regular sum,
//! as for the cumulative sum, it is equivalent to the addout (so you can just add the cumulative `init` to the cumulative `addout`).
mod rev;
use crate::bitnum::{BitNum, Modnum};
use crate::checksum::{CheckBuilderErr, Digest, LinearCheck};
use crate::endian::{Endian, WordSpec};
use crate::keyval::KeyValIter;
use num_traits::{One, Zero};
pub use rev::reverse_fletcher;
#[cfg(feature = "parallel")]
pub use rev::reverse_fletcher_para;
use std::fmt::Display;
use std::str::FromStr;
/// A builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self {
self.swap = Some(s);
self
}
/// The endian of the words of the input file
pub fn | (&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else {
fletch.init = init;
};
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.inendian(x)),
"wordsize" => usize::from_str(¤t_val)
.ok()
.map(|x| fletch.wordsize(x)),
"out_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.outendian(x)),
"check" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.check(x)),
"name" => Some(fletch.name(¤t_val)),
_ => return Err(CheckBuilderErr::UnknownKey(current_key)),
};
match fletch_op {
Some(f) => fletch = f.clone(),
None => return Err(CheckBuilderErr::MalformedString(current_key)),
}
}
Ok(fletch)
}
type Err = CheckBuilderErr;
}
impl<Sum: Modnum> FromStr for Fletcher<Sum> {
/// Construct a new fletcher sum algorithm from a string.
/// Note that all parameters except width are in hexadecimal.
///
/// Example:
///
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// # use std::str::FromStr;
/// Fletcher::<u32>::from_str("width=32 init=1 module=0xfff1 name=\"adler-32\"").is_ok();
/// ```
fn from_str(s: &str) -> Result<Fletcher<Sum>, CheckBuilderErr> {
FletcherBuilder::<Sum>::from_str(s)?.build()
}
type Err = CheckBuilderErr;
}
impl<S: Modnum> Digest for Fletcher<S> {
type Sum = S::Double;
fn init(&self) -> Self::Sum {
self.to_compact((self.init, S::zero()))
}
fn dig_word(&self, sum: Self::Sum, word: u64) -> Self::Sum {
let (mut s, mut c) = self.from_compact(sum);
let modword = S::mod_from(word, &self.module);
s = S::add_mod(s, &modword, &self.module);
c = S::add_mod(c, &s, &self.module);
self.to_compact((s, c))
}
fn finalize(&self, sum: Self::Sum) -> Self::Sum {
self.add(sum, &self.addout)
}
fn to_bytes(&self, s: Self::Sum) -> Vec<u8> {
self.wordspec.output_to_bytes(s, 2 * self.hwidth)
}
fn wordspec(&self) -> WordSpec {
self.wordspec
}
}
impl<S: Modnum> LinearCheck for Fletcher<S> {
type Shift = S;
fn init_shift(&self) -> Self::Shift {
S::zero()
}
fn inc_shift(&self, shift: Self::Shift) -> Self::Shift {
S::add_mod(shift, &S::one(), &self.module)
}
fn shift(&self, sum: Self::Sum, shift: &Self::Shift) -> Self::Sum {
let (s, mut c) = self.from_compact(sum);
let shift_diff = S::mul_mod(s, shift, &self.module);
c = S::add_mod(c, &shift_diff, &self.module);
self.to_compact((s, c))
}
fn add(&self, sum_a: Self::Sum, sum_b: &Self::Sum) -> Self::Sum {
let (sa, ca) = self.from_compact(sum_a);
let (sb, cb) = self.from_compact(*sum_b);
let sum_s = sa.add_mod(&sb, &self.module);
let sum_c = ca.add_mod(&cb, &self.module);
self.to_compact((sum_s, sum_c))
}
fn negate(&self, sum: Self::Sum) -> Self::Sum {
let (s, c) = self.from_compact(sum);
self.to_compact((s.neg_mod(&self.module), c.neg_mod(&self.module)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::checksum::tests::{check_example, test_find, test_prop, test_shifts};
use std::str::FromStr;
#[test]
fn adler32() {
let adel = Fletcher::<u16>::with_options()
.width(32)
.init(1)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&adel);
test_find(&adel);
test_prop(&adel);
check_example(&adel, 0x81bfd25f);
let nobel = Fletcher::with_options()
.width(32)
.init(1u32)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&nobel);
test_find(&nobel);
test_prop(&adel);
check_example(&nobel, 0x81bfd25f);
}
#[test]
fn fletcher16() {
let f16 = Fletcher::with_options()
.width(16)
.module(0xffu8)
.check(0x1ede)
.build()
.unwrap();
test_shifts(&f16);
test_find(&f16);
test_prop(&f16);
check_example(&f16, 0x7815);
}
#[test]
fn fletcher8() {
let f8 = Fletcher::<u8>::from_str("width=8 module=f init=0 addout=0 swap=false check=0xc")
.unwrap();
test_shifts(&f8);
test_prop(&f8);
check_example(&f8, 0x6);
}
}
| inendian | identifier_name |
mod.rs | //! A builder for a Fletcher-like algorithm.
//!
//! The basic functionality of this algorithm is:
//! * there is a sum which is just the bytes summed modulo some number
//! * there is also a second sum which the sum of all of the normal sums (modulo the same number)
//!
//! Note that text word sizes are currently only `u8`.
//!
//! It works roughly like this:
//! ```
//! # fn check(file: &[u8]) -> u32 {
//! # let module = 0xfff1u32;
//! # let init = 1;
//! # let (addout1, addout2) = (0, 0);
//! # let hwidth = 16;
//! let mut sum1 = init;
//! let mut sum2 = 0;
//! for byte in file {
//! sum1 = (sum1 + *byte as u32) % module;
//! sum2 = (sum2 + sum1) % module;
//! }
//! return (sum2 + addout2) % module << hwidth | (sum1 + addout1) % module;
//! # }
//! ```
//! Normally, the sum is represented as the cumulative sum bitshifted to be above the regular sum.
//! This representation will be referred to as "compact".
//!
//! These are the parameters:
//! * width: Total number of bits of the checksum (twice the amount of bits of the individual sums)
//! * module: The number by which both sums get reduced
//! * init: The initial value of the regular sum
//! * addout: The value that gets added at the end, compact
//! * swap: Whether to swap the values in the compact representation, i.e. put the regular sum above the cumulative sum
//! * check: The checksum of the bytes "123456789", checked to be correct on build
//! * name: The name to be used when displaying the algorithm (optional)
//!
//! Note that the `init` parameter, unlike the `addout` parameter, is not compact and is only added to the regular sum,
//! as for the cumulative sum, it is equivalent to the addout (so you can just add the cumulative `init` to the cumulative `addout`).
mod rev;
use crate::bitnum::{BitNum, Modnum};
use crate::checksum::{CheckBuilderErr, Digest, LinearCheck};
use crate::endian::{Endian, WordSpec};
use crate::keyval::KeyValIter;
use num_traits::{One, Zero};
pub use rev::reverse_fletcher;
#[cfg(feature = "parallel")]
pub use rev::reverse_fletcher_para;
use std::fmt::Display;
use std::str::FromStr;
/// A builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self {
self.swap = Some(s);
self
}
/// The endian of the words of the input file
pub fn inendian(&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else {
fletch.init = init;
};
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.inendian(x)),
"wordsize" => usize::from_str(¤t_val)
.ok()
.map(|x| fletch.wordsize(x)),
"out_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.outendian(x)),
"check" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.check(x)),
"name" => Some(fletch.name(¤t_val)),
_ => return Err(CheckBuilderErr::UnknownKey(current_key)),
};
match fletch_op {
Some(f) => fletch = f.clone(),
None => return Err(CheckBuilderErr::MalformedString(current_key)),
}
}
Ok(fletch)
}
type Err = CheckBuilderErr;
}
impl<Sum: Modnum> FromStr for Fletcher<Sum> {
/// Construct a new fletcher sum algorithm from a string.
/// Note that all parameters except width are in hexadecimal.
///
/// Example:
///
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// # use std::str::FromStr;
/// Fletcher::<u32>::from_str("width=32 init=1 module=0xfff1 name=\"adler-32\"").is_ok();
/// ```
fn from_str(s: &str) -> Result<Fletcher<Sum>, CheckBuilderErr> {
FletcherBuilder::<Sum>::from_str(s)?.build()
}
type Err = CheckBuilderErr;
}
impl<S: Modnum> Digest for Fletcher<S> {
type Sum = S::Double;
fn init(&self) -> Self::Sum {
self.to_compact((self.init, S::zero()))
}
fn dig_word(&self, sum: Self::Sum, word: u64) -> Self::Sum {
let (mut s, mut c) = self.from_compact(sum);
let modword = S::mod_from(word, &self.module);
s = S::add_mod(s, &modword, &self.module);
c = S::add_mod(c, &s, &self.module);
self.to_compact((s, c))
}
fn finalize(&self, sum: Self::Sum) -> Self::Sum {
self.add(sum, &self.addout)
}
fn to_bytes(&self, s: Self::Sum) -> Vec<u8> {
self.wordspec.output_to_bytes(s, 2 * self.hwidth)
}
fn wordspec(&self) -> WordSpec {
self.wordspec
}
}
impl<S: Modnum> LinearCheck for Fletcher<S> {
type Shift = S;
fn init_shift(&self) -> Self::Shift {
S::zero()
}
fn inc_shift(&self, shift: Self::Shift) -> Self::Shift {
S::add_mod(shift, &S::one(), &self.module)
}
fn shift(&self, sum: Self::Sum, shift: &Self::Shift) -> Self::Sum {
let (s, mut c) = self.from_compact(sum);
let shift_diff = S::mul_mod(s, shift, &self.module);
c = S::add_mod(c, &shift_diff, &self.module);
self.to_compact((s, c))
}
fn add(&self, sum_a: Self::Sum, sum_b: &Self::Sum) -> Self::Sum {
let (sa, ca) = self.from_compact(sum_a);
let (sb, cb) = self.from_compact(*sum_b);
let sum_s = sa.add_mod(&sb, &self.module);
let sum_c = ca.add_mod(&cb, &self.module);
self.to_compact((sum_s, sum_c))
}
fn negate(&self, sum: Self::Sum) -> Self::Sum {
let (s, c) = self.from_compact(sum);
self.to_compact((s.neg_mod(&self.module), c.neg_mod(&self.module)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::checksum::tests::{check_example, test_find, test_prop, test_shifts};
use std::str::FromStr;
#[test]
fn adler32() {
let adel = Fletcher::<u16>::with_options()
.width(32)
.init(1)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&adel);
test_find(&adel);
test_prop(&adel);
check_example(&adel, 0x81bfd25f);
let nobel = Fletcher::with_options()
.width(32)
.init(1u32)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&nobel);
test_find(&nobel);
test_prop(&adel);
check_example(&nobel, 0x81bfd25f);
}
#[test]
fn fletcher16() {
let f16 = Fletcher::with_options()
.width(16)
.module(0xffu8)
.check(0x1ede)
.build()
.unwrap();
test_shifts(&f16);
test_find(&f16);
test_prop(&f16);
check_example(&f16, 0x7815);
}
#[test]
fn fletcher8() {
let f8 = Fletcher::<u8>::from_str("width=8 module=f init=0 addout=0 swap=false check=0xc")
.unwrap();
test_shifts(&f8);
test_prop(&f8);
check_example(&f8, 0x6); | }
} | random_line_split |
|
mod.rs | //! A builder for a Fletcher-like algorithm.
//!
//! The basic functionality of this algorithm is:
//! * there is a sum which is just the bytes summed modulo some number
//! * there is also a second sum which the sum of all of the normal sums (modulo the same number)
//!
//! Note that text word sizes are currently only `u8`.
//!
//! It works roughly like this:
//! ```
//! # fn check(file: &[u8]) -> u32 {
//! # let module = 0xfff1u32;
//! # let init = 1;
//! # let (addout1, addout2) = (0, 0);
//! # let hwidth = 16;
//! let mut sum1 = init;
//! let mut sum2 = 0;
//! for byte in file {
//! sum1 = (sum1 + *byte as u32) % module;
//! sum2 = (sum2 + sum1) % module;
//! }
//! return (sum2 + addout2) % module << hwidth | (sum1 + addout1) % module;
//! # }
//! ```
//! Normally, the sum is represented as the cumulative sum bitshifted to be above the regular sum.
//! This representation will be referred to as "compact".
//!
//! These are the parameters:
//! * width: Total number of bits of the checksum (twice the amount of bits of the individual sums)
//! * module: The number by which both sums get reduced
//! * init: The initial value of the regular sum
//! * addout: The value that gets added at the end, compact
//! * swap: Whether to swap the values in the compact representation, i.e. put the regular sum above the cumulative sum
//! * check: The checksum of the bytes "123456789", checked to be correct on build
//! * name: The name to be used when displaying the algorithm (optional)
//!
//! Note that the `init` parameter, unlike the `addout` parameter, is not compact and is only added to the regular sum,
//! as for the cumulative sum, it is equivalent to the addout (so you can just add the cumulative `init` to the cumulative `addout`).
mod rev;
use crate::bitnum::{BitNum, Modnum};
use crate::checksum::{CheckBuilderErr, Digest, LinearCheck};
use crate::endian::{Endian, WordSpec};
use crate::keyval::KeyValIter;
use num_traits::{One, Zero};
pub use rev::reverse_fletcher;
#[cfg(feature = "parallel")]
pub use rev::reverse_fletcher_para;
use std::fmt::Display;
use std::str::FromStr;
/// A builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self {
self.swap = Some(s);
self
}
/// The endian of the words of the input file
pub fn inendian(&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else | ;
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.inendian(x)),
"wordsize" => usize::from_str(¤t_val)
.ok()
.map(|x| fletch.wordsize(x)),
"out_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.outendian(x)),
"check" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.check(x)),
"name" => Some(fletch.name(¤t_val)),
_ => return Err(CheckBuilderErr::UnknownKey(current_key)),
};
match fletch_op {
Some(f) => fletch = f.clone(),
None => return Err(CheckBuilderErr::MalformedString(current_key)),
}
}
Ok(fletch)
}
type Err = CheckBuilderErr;
}
impl<Sum: Modnum> FromStr for Fletcher<Sum> {
/// Construct a new fletcher sum algorithm from a string.
/// Note that all parameters except width are in hexadecimal.
///
/// Example:
///
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// # use std::str::FromStr;
/// Fletcher::<u32>::from_str("width=32 init=1 module=0xfff1 name=\"adler-32\"").is_ok();
/// ```
fn from_str(s: &str) -> Result<Fletcher<Sum>, CheckBuilderErr> {
FletcherBuilder::<Sum>::from_str(s)?.build()
}
type Err = CheckBuilderErr;
}
impl<S: Modnum> Digest for Fletcher<S> {
type Sum = S::Double;
fn init(&self) -> Self::Sum {
self.to_compact((self.init, S::zero()))
}
fn dig_word(&self, sum: Self::Sum, word: u64) -> Self::Sum {
let (mut s, mut c) = self.from_compact(sum);
let modword = S::mod_from(word, &self.module);
s = S::add_mod(s, &modword, &self.module);
c = S::add_mod(c, &s, &self.module);
self.to_compact((s, c))
}
fn finalize(&self, sum: Self::Sum) -> Self::Sum {
self.add(sum, &self.addout)
}
fn to_bytes(&self, s: Self::Sum) -> Vec<u8> {
self.wordspec.output_to_bytes(s, 2 * self.hwidth)
}
fn wordspec(&self) -> WordSpec {
self.wordspec
}
}
impl<S: Modnum> LinearCheck for Fletcher<S> {
type Shift = S;
fn init_shift(&self) -> Self::Shift {
S::zero()
}
fn inc_shift(&self, shift: Self::Shift) -> Self::Shift {
S::add_mod(shift, &S::one(), &self.module)
}
fn shift(&self, sum: Self::Sum, shift: &Self::Shift) -> Self::Sum {
let (s, mut c) = self.from_compact(sum);
let shift_diff = S::mul_mod(s, shift, &self.module);
c = S::add_mod(c, &shift_diff, &self.module);
self.to_compact((s, c))
}
fn add(&self, sum_a: Self::Sum, sum_b: &Self::Sum) -> Self::Sum {
let (sa, ca) = self.from_compact(sum_a);
let (sb, cb) = self.from_compact(*sum_b);
let sum_s = sa.add_mod(&sb, &self.module);
let sum_c = ca.add_mod(&cb, &self.module);
self.to_compact((sum_s, sum_c))
}
fn negate(&self, sum: Self::Sum) -> Self::Sum {
let (s, c) = self.from_compact(sum);
self.to_compact((s.neg_mod(&self.module), c.neg_mod(&self.module)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::checksum::tests::{check_example, test_find, test_prop, test_shifts};
use std::str::FromStr;
#[test]
fn adler32() {
let adel = Fletcher::<u16>::with_options()
.width(32)
.init(1)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&adel);
test_find(&adel);
test_prop(&adel);
check_example(&adel, 0x81bfd25f);
let nobel = Fletcher::with_options()
.width(32)
.init(1u32)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&nobel);
test_find(&nobel);
test_prop(&adel);
check_example(&nobel, 0x81bfd25f);
}
#[test]
fn fletcher16() {
let f16 = Fletcher::with_options()
.width(16)
.module(0xffu8)
.check(0x1ede)
.build()
.unwrap();
test_shifts(&f16);
test_find(&f16);
test_prop(&f16);
check_example(&f16, 0x7815);
}
#[test]
fn fletcher8() {
let f8 = Fletcher::<u8>::from_str("width=8 module=f init=0 addout=0 swap=false check=0xc")
.unwrap();
test_shifts(&f8);
test_prop(&f8);
check_example(&f8, 0x6);
}
}
| {
fletch.init = init;
} | conditional_block |
RF.py | # -*- coding: utf-8 -*-
"""RandomForest.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fLF-UltTb6lDISECxqmX_PMPXqH2B7sl
"""
## IMPORTS
import numpy as np
import pandas as pd
import scipy
from scipy import signal
from scipy.io import loadmat
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load in full data set from MATLAB
data = loadmat('cis519project_data.mat')
# Store data into arrays
full_dg_p1 = data['full_dg_p1']; full_dg_p2 = data['full_dg_p2']; full_dg_p3 = data['full_dg_p3']
full_ecog_p1 = data['full_ecog_p1']; full_ecog_p2 = data['full_ecog_p2']; full_ecog_p3 = data['full_ecog_p3']
"""Prepare Labels """
# make individual finger flexion arrays
dg_finger1 = []; dg_finger2 = []; dg_finger3 = []; dg_finger4 = []; dg_finger5 = []
for array in full_dg_p1:
dg_finger1.append(array[0])
dg_finger2.append(array[1])
dg_finger3.append(array[2])
dg_finger4.append(array[3])
dg_finger5.append(array[4])
# make arrays of 500 samples (500 ms) long with 250 sample (250 ms) sliding window
# time windows should be: (xLen/fs - winLen + winDisp)/winDisp --> (300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix | for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaves, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_leaves)), max_leaves)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Leaf nodes')
plt.ylabel('Accuracy')
print(max(test_scores))
## since fingers 3 and 4 are highly correlated, maybe change the class_weight parameter
# Optimize Max Depth
max_depths = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100]
train_scores = []
test_scores = []
for max_depth in max_depths:
# shuffle train test set each time???
model = RandomForestClassifier(max_depth=max_depth)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_depth)+' splits, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_depths)), max_depths)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Tree Depth')
plt.ylabel('Accuracy')
# Optimize N Estimators
n_ests = [2,4,6,8,10,50,100,150,200,250,300,400,500,750,1000,2000]
train_scores = []
test_scores = []
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=20,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# Optimize N Estimators and Max Leaf Nodes
n_ests = [10,50,100,150,200,250,300,400,500,750,1000]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
print(max(test_scores))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
#plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# FINAL OPTIMIZED MODEL
final_model = RandomForestClassifier(max_leaf_nodes=50,n_estimators=250)
final_model.fit(X_train_shuff, Y_train_shuff)
# get accuracy
train_score = final_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = final_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score) | features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct): | random_line_split |
RF.py | # -*- coding: utf-8 -*-
"""RandomForest.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fLF-UltTb6lDISECxqmX_PMPXqH2B7sl
"""
## IMPORTS
import numpy as np
import pandas as pd
import scipy
from scipy import signal
from scipy.io import loadmat
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load in full data set from MATLAB
data = loadmat('cis519project_data.mat')
# Store data into arrays
full_dg_p1 = data['full_dg_p1']; full_dg_p2 = data['full_dg_p2']; full_dg_p3 = data['full_dg_p3']
full_ecog_p1 = data['full_ecog_p1']; full_ecog_p2 = data['full_ecog_p2']; full_ecog_p3 = data['full_ecog_p3']
"""Prepare Labels """
# make individual finger flexion arrays
dg_finger1 = []; dg_finger2 = []; dg_finger3 = []; dg_finger4 = []; dg_finger5 = []
for array in full_dg_p1:
dg_finger1.append(array[0])
dg_finger2.append(array[1])
dg_finger3.append(array[2])
dg_finger4.append(array[3])
dg_finger5.append(array[4])
# make arrays of 500 samples (500 ms) long with 250 sample (250 ms) sliding window
# time windows should be: (xLen/fs - winLen + winDisp)/winDisp --> (300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
|
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaves, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_leaves)), max_leaves)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Leaf nodes')
plt.ylabel('Accuracy')
print(max(test_scores))
## since fingers 3 and 4 are highly correlated, maybe change the class_weight parameter
# Optimize Max Depth
max_depths = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100]
train_scores = []
test_scores = []
for max_depth in max_depths:
# shuffle train test set each time???
model = RandomForestClassifier(max_depth=max_depth)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_depth)+' splits, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_depths)), max_depths)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Tree Depth')
plt.ylabel('Accuracy')
# Optimize N Estimators
n_ests = [2,4,6,8,10,50,100,150,200,250,300,400,500,750,1000,2000]
train_scores = []
test_scores = []
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=20,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# Optimize N Estimators and Max Leaf Nodes
n_ests = [10,50,100,150,200,250,300,400,500,750,1000]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
print(max(test_scores))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
#plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# FINAL OPTIMIZED MODEL
final_model = RandomForestClassifier(max_leaf_nodes=50,n_estimators=250)
final_model.fit(X_train_shuff, Y_train_shuff)
# get accuracy
train_score = final_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = final_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
| return(np.max(x)) | identifier_body |
RF.py | # -*- coding: utf-8 -*-
"""RandomForest.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fLF-UltTb6lDISECxqmX_PMPXqH2B7sl
"""
## IMPORTS
import numpy as np
import pandas as pd
import scipy
from scipy import signal
from scipy.io import loadmat
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load in full data set from MATLAB
data = loadmat('cis519project_data.mat')
# Store data into arrays
full_dg_p1 = data['full_dg_p1']; full_dg_p2 = data['full_dg_p2']; full_dg_p3 = data['full_dg_p3']
full_ecog_p1 = data['full_ecog_p1']; full_ecog_p2 = data['full_ecog_p2']; full_ecog_p3 = data['full_ecog_p3']
"""Prepare Labels """
# make individual finger flexion arrays
dg_finger1 = []; dg_finger2 = []; dg_finger3 = []; dg_finger4 = []; dg_finger5 = []
for array in full_dg_p1:
dg_finger1.append(array[0])
dg_finger2.append(array[1])
dg_finger3.append(array[2])
dg_finger4.append(array[3])
dg_finger5.append(array[4])
# make arrays of 500 samples (500 ms) long with 250 sample (250 ms) sliding window
# time windows should be: (xLen/fs - winLen + winDisp)/winDisp --> (300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def | (x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaves, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_leaves)), max_leaves)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Leaf nodes')
plt.ylabel('Accuracy')
print(max(test_scores))
## since fingers 3 and 4 are highly correlated, maybe change the class_weight parameter
# Optimize Max Depth
max_depths = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100]
train_scores = []
test_scores = []
for max_depth in max_depths:
# shuffle train test set each time???
model = RandomForestClassifier(max_depth=max_depth)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_depth)+' splits, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_depths)), max_depths)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Tree Depth')
plt.ylabel('Accuracy')
# Optimize N Estimators
n_ests = [2,4,6,8,10,50,100,150,200,250,300,400,500,750,1000,2000]
train_scores = []
test_scores = []
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=20,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# Optimize N Estimators and Max Leaf Nodes
n_ests = [10,50,100,150,200,250,300,400,500,750,1000]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
print(max(test_scores))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
#plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# FINAL OPTIMIZED MODEL
final_model = RandomForestClassifier(max_leaf_nodes=50,n_estimators=250)
final_model.fit(X_train_shuff, Y_train_shuff)
# get accuracy
train_score = final_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = final_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
| line_length | identifier_name |
RF.py | # -*- coding: utf-8 -*-
"""RandomForest.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fLF-UltTb6lDISECxqmX_PMPXqH2B7sl
"""
## IMPORTS
import numpy as np
import pandas as pd
import scipy
from scipy import signal
from scipy.io import loadmat
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load in full data set from MATLAB
data = loadmat('cis519project_data.mat')
# Store data into arrays
full_dg_p1 = data['full_dg_p1']; full_dg_p2 = data['full_dg_p2']; full_dg_p3 = data['full_dg_p3']
full_ecog_p1 = data['full_ecog_p1']; full_ecog_p2 = data['full_ecog_p2']; full_ecog_p3 = data['full_ecog_p3']
"""Prepare Labels """
# make individual finger flexion arrays
dg_finger1 = []; dg_finger2 = []; dg_finger3 = []; dg_finger4 = []; dg_finger5 = []
for array in full_dg_p1:
dg_finger1.append(array[0])
dg_finger2.append(array[1])
dg_finger3.append(array[2])
dg_finger4.append(array[3])
dg_finger5.append(array[4])
# make arrays of 500 samples (500 ms) long with 250 sample (250 ms) sliding window
# time windows should be: (xLen/fs - winLen + winDisp)/winDisp --> (300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
|
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaves, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_leaves)), max_leaves)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Leaf nodes')
plt.ylabel('Accuracy')
print(max(test_scores))
## since fingers 3 and 4 are highly correlated, maybe change the class_weight parameter
# Optimize Max Depth
max_depths = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100]
train_scores = []
test_scores = []
for max_depth in max_depths:
# shuffle train test set each time???
model = RandomForestClassifier(max_depth=max_depth)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_depth)+' splits, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(max_depths)), max_depths)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Max Tree Depth')
plt.ylabel('Accuracy')
# Optimize N Estimators
n_ests = [2,4,6,8,10,50,100,150,200,250,300,400,500,750,1000,2000]
train_scores = []
test_scores = []
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=20,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# Optimize N Estimators and Max Leaf Nodes
n_ests = [10,50,100,150,200,250,300,400,500,750,1000]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
for n_est in n_ests:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes,n_estimators=n_est)
model.fit(X_train_shuff, Y_train_shuff)
train_score = model.score(X_train_shuff, Y_train_shuff)
test_score = model.score(X_test_shuff, Y_test_shuff)
train_scores.append(train_score)
test_scores.append(test_score)
print(str(max_leaf_nodes)+' leaf nodes, '+str(n_est)+' trees, train:'+str(train_score)+', test:'+str(test_score))
print(max(test_scores))
# plot
plt.plot(train_scores)
plt.plot(test_scores)
#plt.xticks(range(len(n_ests)), n_ests)
plt.legend(['Train Acc','Test Acc'])
plt.xlabel('Number of Trees')
plt.ylabel('Accuracy')
# FINAL OPTIMIZED MODEL
final_model = RandomForestClassifier(max_leaf_nodes=50,n_estimators=250)
final_model.fit(X_train_shuff, Y_train_shuff)
# get accuracy
train_score = final_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = final_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
| temp += abs(channel[i+1] - channel[i]) | conditional_block |
cmake_templates.py | from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
|
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition) | chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s | identifier_body |
cmake_templates.py | from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def | (f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition) | WriteSourceDirectories | identifier_name |
cmake_templates.py | from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
|
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition) | f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name))) | conditional_block |
cmake_templates.py | from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime): | else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition) | runtime = InsertEnvVariable(runtime) | random_line_split |
__init__.py | """@package XLM
Top level Excel XLM macro emulator interface.
"""
from __future__ import print_function
import subprocess
import sys
import re
import os
import string
# https://github.com/kirk-sayre-work/office_dumper.git
import excel
import XLM.color_print
import XLM.stack_transformer
import XLM.XLM_Object
import XLM.xlm_library
import XLM.utils
import XLM.ms_stack_transformer
import XLM.excel2007
## Check installation prerequisites.
# Make sure olevba is installed.
try:
subprocess.check_output(["olevba", "-h"])
except Exception as e:
color_print.output('r', "ERROR: It looks like olevba is not installed. " + str(e) + "\n")
sys.exit(101)
# Debugging flag.
debug = False
####################################################################
def | (flag):
"""
Turn debugging on or off.
@param flag (boolean) True means output debugging, False means no.
"""
global debug
debug = flag
XLM.XLM_Object.debug = flag
XLM.xlm_library.debug = flag
XLM.ms_stack_transformer.debug = flag
XLM.stack_transformer.debug = flag
XLM.excel2007.debug = flag
####################################################################
def _extract_xlm(maldoc):
"""
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Run olevba on the file and extract the XLM macro code lines.
color_print.output('g', "Analyzing Excel 97 file ...")
xlm_code = _extract_xlm(maldoc)
color_print.output('g', "Extracted XLM with olevba.")
if debug:
print("=========== START RAW XLM ==============")
print(xlm_code)
print("=========== DONE RAW XLM ==============")
if (xlm_code is None):
color_print.output('r', "ERROR: Unable to extract XLM. Emulation aborted.")
return (None, None, None)
# Parse the XLM text and get XLM objects that can be emulated.
xlm_cells = XLM.stack_transformer.parse_olevba_xlm(xlm_code)
color_print.output('g', "Parsed olevba XLM macros.")
if (xlm_cells is None):
color_print.output('r', "ERROR: Parsing of XLM failed. Emulation aborted.")
return (None, None, None)
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def emulate(maldoc):
"""
Emulate the behavior of a given Excel file containing XLM macros.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) 1st element is a list of 3 element tuples containing the actions performed
by the sheet, 2nd element is the human readable XLM code.
"""
# Excel 97 file?
if (XLM.utils.is_excel_file_97(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_97(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 97 file failed. Emulation aborted.")
return ([], "")
# Excel 2007+ file?
elif (XLM.utils.is_excel_file_2007(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_2007(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 2007 file failed. Emulation aborted.")
return ([], "")
else:
color_print.output('y', "WARNING: " + maldoc + " is not an Excel file. Emulation aborted.")
return ([], "")
# Save the indices of the XLM cells in the workbook. We do this here directly so that
# the base definition of the ExcelWorkbook class does not need to be changed.
xlm_sheet.xlm_cell_indices = xlm_cell_indices
# Emulate the XLM.
color_print.output('g', "Starting XLM emulation ...")
r = XLM_Object.eval(xlm_sheet)
color_print.output('g', "Finished XLM emulation.")
# Done.
return r
| set_debug | identifier_name |
__init__.py | """@package XLM
Top level Excel XLM macro emulator interface.
"""
from __future__ import print_function
import subprocess
import sys
import re
import os
import string
# https://github.com/kirk-sayre-work/office_dumper.git
import excel
import XLM.color_print
import XLM.stack_transformer
import XLM.XLM_Object
import XLM.xlm_library
import XLM.utils
import XLM.ms_stack_transformer
import XLM.excel2007
## Check installation prerequisites.
# Make sure olevba is installed.
try:
subprocess.check_output(["olevba", "-h"])
except Exception as e:
color_print.output('r', "ERROR: It looks like olevba is not installed. " + str(e) + "\n")
sys.exit(101)
# Debugging flag.
debug = False
####################################################################
def set_debug(flag):
"""
Turn debugging on or off.
@param flag (boolean) True means output debugging, False means no.
"""
global debug
debug = flag
XLM.XLM_Object.debug = flag
XLM.xlm_library.debug = flag
XLM.ms_stack_transformer.debug = flag
XLM.stack_transformer.debug = flag
XLM.excel2007.debug = flag
####################################################################
def _extract_xlm(maldoc):
"""
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
""" |
# Run olevba on the file and extract the XLM macro code lines.
color_print.output('g', "Analyzing Excel 97 file ...")
xlm_code = _extract_xlm(maldoc)
color_print.output('g', "Extracted XLM with olevba.")
if debug:
print("=========== START RAW XLM ==============")
print(xlm_code)
print("=========== DONE RAW XLM ==============")
if (xlm_code is None):
color_print.output('r', "ERROR: Unable to extract XLM. Emulation aborted.")
return (None, None, None)
# Parse the XLM text and get XLM objects that can be emulated.
xlm_cells = XLM.stack_transformer.parse_olevba_xlm(xlm_code)
color_print.output('g', "Parsed olevba XLM macros.")
if (xlm_cells is None):
color_print.output('r', "ERROR: Parsing of XLM failed. Emulation aborted.")
return (None, None, None)
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def emulate(maldoc):
"""
Emulate the behavior of a given Excel file containing XLM macros.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) 1st element is a list of 3 element tuples containing the actions performed
by the sheet, 2nd element is the human readable XLM code.
"""
# Excel 97 file?
if (XLM.utils.is_excel_file_97(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_97(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 97 file failed. Emulation aborted.")
return ([], "")
# Excel 2007+ file?
elif (XLM.utils.is_excel_file_2007(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_2007(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 2007 file failed. Emulation aborted.")
return ([], "")
else:
color_print.output('y', "WARNING: " + maldoc + " is not an Excel file. Emulation aborted.")
return ([], "")
# Save the indices of the XLM cells in the workbook. We do this here directly so that
# the base definition of the ExcelWorkbook class does not need to be changed.
xlm_sheet.xlm_cell_indices = xlm_cell_indices
# Emulate the XLM.
color_print.output('g', "Starting XLM emulation ...")
r = XLM_Object.eval(xlm_sheet)
color_print.output('g', "Finished XLM emulation.")
# Done.
return r | random_line_split |
|
__init__.py | """@package XLM
Top level Excel XLM macro emulator interface.
"""
from __future__ import print_function
import subprocess
import sys
import re
import os
import string
# https://github.com/kirk-sayre-work/office_dumper.git
import excel
import XLM.color_print
import XLM.stack_transformer
import XLM.XLM_Object
import XLM.xlm_library
import XLM.utils
import XLM.ms_stack_transformer
import XLM.excel2007
## Check installation prerequisites.
# Make sure olevba is installed.
try:
subprocess.check_output(["olevba", "-h"])
except Exception as e:
color_print.output('r', "ERROR: It looks like olevba is not installed. " + str(e) + "\n")
sys.exit(101)
# Debugging flag.
debug = False
####################################################################
def set_debug(flag):
"""
Turn debugging on or off.
@param flag (boolean) True means output debugging, False means no.
"""
global debug
debug = flag
XLM.XLM_Object.debug = flag
XLM.xlm_library.debug = flag
XLM.ms_stack_transformer.debug = flag
XLM.stack_transformer.debug = flag
XLM.excel2007.debug = flag
####################################################################
def _extract_xlm(maldoc):
|
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Run olevba on the file and extract the XLM macro code lines.
color_print.output('g', "Analyzing Excel 97 file ...")
xlm_code = _extract_xlm(maldoc)
color_print.output('g', "Extracted XLM with olevba.")
if debug:
print("=========== START RAW XLM ==============")
print(xlm_code)
print("=========== DONE RAW XLM ==============")
if (xlm_code is None):
color_print.output('r', "ERROR: Unable to extract XLM. Emulation aborted.")
return (None, None, None)
# Parse the XLM text and get XLM objects that can be emulated.
xlm_cells = XLM.stack_transformer.parse_olevba_xlm(xlm_code)
color_print.output('g', "Parsed olevba XLM macros.")
if (xlm_cells is None):
color_print.output('r', "ERROR: Parsing of XLM failed. Emulation aborted.")
return (None, None, None)
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def emulate(maldoc):
"""
Emulate the behavior of a given Excel file containing XLM macros.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) 1st element is a list of 3 element tuples containing the actions performed
by the sheet, 2nd element is the human readable XLM code.
"""
# Excel 97 file?
if (XLM.utils.is_excel_file_97(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_97(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 97 file failed. Emulation aborted.")
return ([], "")
# Excel 2007+ file?
elif (XLM.utils.is_excel_file_2007(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_2007(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 2007 file failed. Emulation aborted.")
return ([], "")
else:
color_print.output('y', "WARNING: " + maldoc + " is not an Excel file. Emulation aborted.")
return ([], "")
# Save the indices of the XLM cells in the workbook. We do this here directly so that
# the base definition of the ExcelWorkbook class does not need to be changed.
xlm_sheet.xlm_cell_indices = xlm_cell_indices
# Emulate the XLM.
color_print.output('g', "Starting XLM emulation ...")
r = XLM_Object.eval(xlm_sheet)
color_print.output('g', "Finished XLM emulation.")
# Done.
return r
| """
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r | identifier_body |
__init__.py | """@package XLM
Top level Excel XLM macro emulator interface.
"""
from __future__ import print_function
import subprocess
import sys
import re
import os
import string
# https://github.com/kirk-sayre-work/office_dumper.git
import excel
import XLM.color_print
import XLM.stack_transformer
import XLM.XLM_Object
import XLM.xlm_library
import XLM.utils
import XLM.ms_stack_transformer
import XLM.excel2007
## Check installation prerequisites.
# Make sure olevba is installed.
try:
subprocess.check_output(["olevba", "-h"])
except Exception as e:
color_print.output('r', "ERROR: It looks like olevba is not installed. " + str(e) + "\n")
sys.exit(101)
# Debugging flag.
debug = False
####################################################################
def set_debug(flag):
"""
Turn debugging on or off.
@param flag (boolean) True means output debugging, False means no.
"""
global debug
debug = flag
XLM.XLM_Object.debug = flag
XLM.xlm_library.debug = flag
XLM.ms_stack_transformer.debug = flag
XLM.stack_transformer.debug = flag
XLM.excel2007.debug = flag
####################################################################
def _extract_xlm(maldoc):
"""
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
|
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Run olevba on the file and extract the XLM macro code lines.
color_print.output('g', "Analyzing Excel 97 file ...")
xlm_code = _extract_xlm(maldoc)
color_print.output('g', "Extracted XLM with olevba.")
if debug:
print("=========== START RAW XLM ==============")
print(xlm_code)
print("=========== DONE RAW XLM ==============")
if (xlm_code is None):
color_print.output('r', "ERROR: Unable to extract XLM. Emulation aborted.")
return (None, None, None)
# Parse the XLM text and get XLM objects that can be emulated.
xlm_cells = XLM.stack_transformer.parse_olevba_xlm(xlm_code)
color_print.output('g', "Parsed olevba XLM macros.")
if (xlm_cells is None):
color_print.output('r', "ERROR: Parsing of XLM failed. Emulation aborted.")
return (None, None, None)
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def emulate(maldoc):
"""
Emulate the behavior of a given Excel file containing XLM macros.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) 1st element is a list of 3 element tuples containing the actions performed
by the sheet, 2nd element is the human readable XLM code.
"""
# Excel 97 file?
if (XLM.utils.is_excel_file_97(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_97(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 97 file failed. Emulation aborted.")
return ([], "")
# Excel 2007+ file?
elif (XLM.utils.is_excel_file_2007(maldoc)):
workbook, xlm_cell_indices, xlm_sheet = _read_workbook_2007(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading Excel 2007 file failed. Emulation aborted.")
return ([], "")
else:
color_print.output('y', "WARNING: " + maldoc + " is not an Excel file. Emulation aborted.")
return ([], "")
# Save the indices of the XLM cells in the workbook. We do this here directly so that
# the base definition of the ExcelWorkbook class does not need to be changed.
xlm_sheet.xlm_cell_indices = xlm_cell_indices
# Emulate the XLM.
color_print.output('g', "Starting XLM emulation ...")
r = XLM_Object.eval(xlm_sheet)
color_print.output('g', "Finished XLM emulation.")
# Done.
return r
| print((row, col))
print(xlm_cell) | conditional_block |
rainstorm.rs | #![feature(macro_rules, intrinsics, lang_items, globs)]
#![no_std]
extern crate libc;
extern crate core;
extern crate alloc;
extern crate collections;
extern crate rand;
pub use core::prelude::*;
pub use cheats::{Cheat, CheatManager};
pub use alloc::owned::Box;
pub use collections::Vec;
use core::raw::Repr;
mod logging;
pub mod sdk;
mod vmthook;
pub mod utils;
mod cheats;
mod std {
pub use core::fmt; //lol
pub use core::option;
pub use core::num;
}
#[allow(dead_code)]
pub mod cmath {
use libc::{c_float, c_int};
#[link_name = "m"]
extern {
pub fn acosf(n: c_float) -> c_float;
pub fn asinf(n: c_float) -> c_float;
pub fn atanf(n: c_float) -> c_float;
pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn cbrtf(n: c_float) -> c_float;
pub fn coshf(n: c_float) -> c_float;
pub fn erff(n: c_float) -> c_float;
pub fn erfcf(n: c_float) -> c_float;
pub fn expm1f(n: c_float) -> c_float;
pub fn fdimf(a: c_float, b: c_float) -> c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
}; | #[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll_hooker.hook(22, hooked_extramousesample_trampoline);
// let mut ivengineclient_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ivengineclient.get_ptr().to_uint() as *mut *const ());
// REAL_SERVERCMDKEYVALUES = ivengineclient_hooker.get_orig_method(185);
// ivengineclient_hooker.hook(185, sdk::raw::get_hooked_servercmdkeyvalues());
CINPUT_PTR = locate_cinput().expect("Failed to locate CInput pointer (signature not found)");
let mut hooker = vmthook::VMTHooker::new(CINPUT_PTR as *mut *const ());
hooker.hook(8, sdk::get_hooked_getusercmd());
let mut iprediction_hooker = vmthook::VMTHooker::new(sdk::raw::getptr_iprediction().to_uint() as *mut *const ());
REAL_RUNCOMMAND = iprediction_hooker.get_orig_method(17);
iprediction_hooker.hook(17, sdk::raw::get_hooked_runcommand());
};
}
/// If we haven't seen this INetChannel before, hook it.
fn maybe_hook_inetchannel(ptrs: &GamePointers) {
static mut LAST_NETCHANNEL: Option<sdk::raw::INetChannelPtr> = None;
unsafe {
let inetchannel = sdk::raw::get_current_inetchannel(ptrs.ivengineclient.get_ptr());
//log!("chan: {}\n", inetchannel.to_uint());
let is_new_channel = match LAST_NETCHANNEL {
Some(last) => { inetchannel != last },
None => true
};
LAST_NETCHANNEL = Some(inetchannel);
if !is_new_channel {
//log!("Not patching old netchannel");
return;
}
let mut hooker = vmthook::VMTHooker::new(inetchannel.to_uint() as *mut *const ());
REAL_NETCHANNEL_SENDDATAGRAM = hooker.get_orig_method(46);
hooker.hook(46, ::sdk::raw::get_netchannel_senddatagram_trampoline().to_uint() as *const ());
log!("senddatagram: {}\n", hooker.get_orig_method(46));
};
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "begin_unwind"]
extern fn begin_unwind(fmt: &core::fmt::Arguments, file: &str, line: uint) -> ! {
log!("Failed at line {} of {}!\n", line, file);
let _ = logging::log_fmt(fmt).ok(); // if we fail here, god help us
unsafe { libc::exit(42); }
}
#[allow(non_snake_case_functions)]
#[no_mangle]
pub extern "C" fn _imp___onexit() {
}
#[no_mangle]
pub extern "C" fn __dllonexit() {
}
#[no_mangle]
pub extern "C" fn __setusermatherr() {
} | }
| random_line_split |
rainstorm.rs | #![feature(macro_rules, intrinsics, lang_items, globs)]
#![no_std]
extern crate libc;
extern crate core;
extern crate alloc;
extern crate collections;
extern crate rand;
pub use core::prelude::*;
pub use cheats::{Cheat, CheatManager};
pub use alloc::owned::Box;
pub use collections::Vec;
use core::raw::Repr;
mod logging;
pub mod sdk;
mod vmthook;
pub mod utils;
mod cheats;
mod std {
pub use core::fmt; //lol
pub use core::option;
pub use core::num;
}
#[allow(dead_code)]
pub mod cmath {
use libc::{c_float, c_int};
#[link_name = "m"]
extern {
pub fn acosf(n: c_float) -> c_float;
pub fn asinf(n: c_float) -> c_float;
pub fn atanf(n: c_float) -> c_float;
pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn cbrtf(n: c_float) -> c_float;
pub fn coshf(n: c_float) -> c_float;
pub fn erff(n: c_float) -> c_float;
pub fn erfcf(n: c_float) -> c_float;
pub fn expm1f(n: c_float) -> c_float;
pub fn fdimf(a: c_float, b: c_float) -> c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll_hooker.hook(22, hooked_extramousesample_trampoline);
// let mut ivengineclient_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ivengineclient.get_ptr().to_uint() as *mut *const ());
// REAL_SERVERCMDKEYVALUES = ivengineclient_hooker.get_orig_method(185);
// ivengineclient_hooker.hook(185, sdk::raw::get_hooked_servercmdkeyvalues());
CINPUT_PTR = locate_cinput().expect("Failed to locate CInput pointer (signature not found)");
let mut hooker = vmthook::VMTHooker::new(CINPUT_PTR as *mut *const ());
hooker.hook(8, sdk::get_hooked_getusercmd());
let mut iprediction_hooker = vmthook::VMTHooker::new(sdk::raw::getptr_iprediction().to_uint() as *mut *const ());
REAL_RUNCOMMAND = iprediction_hooker.get_orig_method(17);
iprediction_hooker.hook(17, sdk::raw::get_hooked_runcommand());
};
}
/// If we haven't seen this INetChannel before, hook it.
fn maybe_hook_inetchannel(ptrs: &GamePointers) {
static mut LAST_NETCHANNEL: Option<sdk::raw::INetChannelPtr> = None;
unsafe {
let inetchannel = sdk::raw::get_current_inetchannel(ptrs.ivengineclient.get_ptr());
//log!("chan: {}\n", inetchannel.to_uint());
let is_new_channel = match LAST_NETCHANNEL {
Some(last) => { inetchannel != last },
None => true
};
LAST_NETCHANNEL = Some(inetchannel);
if !is_new_channel {
//log!("Not patching old netchannel");
return;
}
let mut hooker = vmthook::VMTHooker::new(inetchannel.to_uint() as *mut *const ());
REAL_NETCHANNEL_SENDDATAGRAM = hooker.get_orig_method(46);
hooker.hook(46, ::sdk::raw::get_netchannel_senddatagram_trampoline().to_uint() as *const ());
log!("senddatagram: {}\n", hooker.get_orig_method(46));
};
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "begin_unwind"]
extern fn begin_unwind(fmt: &core::fmt::Arguments, file: &str, line: uint) -> ! |
#[allow(non_snake_case_functions)]
#[no_mangle]
pub extern "C" fn _imp___onexit() {
}
#[no_mangle]
pub extern "C" fn __dllonexit() {
}
#[no_mangle]
pub extern "C" fn __setusermatherr() {
} | {
log!("Failed at line {} of {}!\n", line, file);
let _ = logging::log_fmt(fmt).ok(); // if we fail here, god help us
unsafe { libc::exit(42); }
} | identifier_body |
rainstorm.rs | #![feature(macro_rules, intrinsics, lang_items, globs)]
#![no_std]
extern crate libc;
extern crate core;
extern crate alloc;
extern crate collections;
extern crate rand;
pub use core::prelude::*;
pub use cheats::{Cheat, CheatManager};
pub use alloc::owned::Box;
pub use collections::Vec;
use core::raw::Repr;
mod logging;
pub mod sdk;
mod vmthook;
pub mod utils;
mod cheats;
mod std {
pub use core::fmt; //lol
pub use core::option;
pub use core::num;
}
#[allow(dead_code)]
pub mod cmath {
use libc::{c_float, c_int};
#[link_name = "m"]
extern {
pub fn acosf(n: c_float) -> c_float;
pub fn asinf(n: c_float) -> c_float;
pub fn atanf(n: c_float) -> c_float;
pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn cbrtf(n: c_float) -> c_float;
pub fn coshf(n: c_float) -> c_float;
pub fn erff(n: c_float) -> c_float;
pub fn erfcf(n: c_float) -> c_float;
pub fn expm1f(n: c_float) -> c_float;
pub fn fdimf(a: c_float, b: c_float) -> c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn | (c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll_hooker.hook(22, hooked_extramousesample_trampoline);
// let mut ivengineclient_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ivengineclient.get_ptr().to_uint() as *mut *const ());
// REAL_SERVERCMDKEYVALUES = ivengineclient_hooker.get_orig_method(185);
// ivengineclient_hooker.hook(185, sdk::raw::get_hooked_servercmdkeyvalues());
CINPUT_PTR = locate_cinput().expect("Failed to locate CInput pointer (signature not found)");
let mut hooker = vmthook::VMTHooker::new(CINPUT_PTR as *mut *const ());
hooker.hook(8, sdk::get_hooked_getusercmd());
let mut iprediction_hooker = vmthook::VMTHooker::new(sdk::raw::getptr_iprediction().to_uint() as *mut *const ());
REAL_RUNCOMMAND = iprediction_hooker.get_orig_method(17);
iprediction_hooker.hook(17, sdk::raw::get_hooked_runcommand());
};
}
/// If we haven't seen this INetChannel before, hook it.
fn maybe_hook_inetchannel(ptrs: &GamePointers) {
static mut LAST_NETCHANNEL: Option<sdk::raw::INetChannelPtr> = None;
unsafe {
let inetchannel = sdk::raw::get_current_inetchannel(ptrs.ivengineclient.get_ptr());
//log!("chan: {}\n", inetchannel.to_uint());
let is_new_channel = match LAST_NETCHANNEL {
Some(last) => { inetchannel != last },
None => true
};
LAST_NETCHANNEL = Some(inetchannel);
if !is_new_channel {
//log!("Not patching old netchannel");
return;
}
let mut hooker = vmthook::VMTHooker::new(inetchannel.to_uint() as *mut *const ());
REAL_NETCHANNEL_SENDDATAGRAM = hooker.get_orig_method(46);
hooker.hook(46, ::sdk::raw::get_netchannel_senddatagram_trampoline().to_uint() as *const ());
log!("senddatagram: {}\n", hooker.get_orig_method(46));
};
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "begin_unwind"]
extern fn begin_unwind(fmt: &core::fmt::Arguments, file: &str, line: uint) -> ! {
log!("Failed at line {} of {}!\n", line, file);
let _ = logging::log_fmt(fmt).ok(); // if we fail here, god help us
unsafe { libc::exit(42); }
}
#[allow(non_snake_case_functions)]
#[no_mangle]
pub extern "C" fn _imp___onexit() {
}
#[no_mangle]
pub extern "C" fn __dllonexit() {
}
#[no_mangle]
pub extern "C" fn __setusermatherr() {
} | rainstorm_command_cb | identifier_name |
rainstorm.rs | #![feature(macro_rules, intrinsics, lang_items, globs)]
#![no_std]
extern crate libc;
extern crate core;
extern crate alloc;
extern crate collections;
extern crate rand;
pub use core::prelude::*;
pub use cheats::{Cheat, CheatManager};
pub use alloc::owned::Box;
pub use collections::Vec;
use core::raw::Repr;
mod logging;
pub mod sdk;
mod vmthook;
pub mod utils;
mod cheats;
mod std {
pub use core::fmt; //lol
pub use core::option;
pub use core::num;
}
#[allow(dead_code)]
pub mod cmath {
use libc::{c_float, c_int};
#[link_name = "m"]
extern {
pub fn acosf(n: c_float) -> c_float;
pub fn asinf(n: c_float) -> c_float;
pub fn atanf(n: c_float) -> c_float;
pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn cbrtf(n: c_float) -> c_float;
pub fn coshf(n: c_float) -> c_float;
pub fn erff(n: c_float) -> c_float;
pub fn erfcf(n: c_float) -> c_float;
pub fn expm1f(n: c_float) -> c_float;
pub fn fdimf(a: c_float, b: c_float) -> c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() | else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll_hooker.hook(22, hooked_extramousesample_trampoline);
// let mut ivengineclient_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ivengineclient.get_ptr().to_uint() as *mut *const ());
// REAL_SERVERCMDKEYVALUES = ivengineclient_hooker.get_orig_method(185);
// ivengineclient_hooker.hook(185, sdk::raw::get_hooked_servercmdkeyvalues());
CINPUT_PTR = locate_cinput().expect("Failed to locate CInput pointer (signature not found)");
let mut hooker = vmthook::VMTHooker::new(CINPUT_PTR as *mut *const ());
hooker.hook(8, sdk::get_hooked_getusercmd());
let mut iprediction_hooker = vmthook::VMTHooker::new(sdk::raw::getptr_iprediction().to_uint() as *mut *const ());
REAL_RUNCOMMAND = iprediction_hooker.get_orig_method(17);
iprediction_hooker.hook(17, sdk::raw::get_hooked_runcommand());
};
}
/// If we haven't seen this INetChannel before, hook it.
fn maybe_hook_inetchannel(ptrs: &GamePointers) {
static mut LAST_NETCHANNEL: Option<sdk::raw::INetChannelPtr> = None;
unsafe {
let inetchannel = sdk::raw::get_current_inetchannel(ptrs.ivengineclient.get_ptr());
//log!("chan: {}\n", inetchannel.to_uint());
let is_new_channel = match LAST_NETCHANNEL {
Some(last) => { inetchannel != last },
None => true
};
LAST_NETCHANNEL = Some(inetchannel);
if !is_new_channel {
//log!("Not patching old netchannel");
return;
}
let mut hooker = vmthook::VMTHooker::new(inetchannel.to_uint() as *mut *const ());
REAL_NETCHANNEL_SENDDATAGRAM = hooker.get_orig_method(46);
hooker.hook(46, ::sdk::raw::get_netchannel_senddatagram_trampoline().to_uint() as *const ());
log!("senddatagram: {}\n", hooker.get_orig_method(46));
};
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "begin_unwind"]
extern fn begin_unwind(fmt: &core::fmt::Arguments, file: &str, line: uint) -> ! {
log!("Failed at line {} of {}!\n", line, file);
let _ = logging::log_fmt(fmt).ok(); // if we fail here, god help us
unsafe { libc::exit(42); }
}
#[allow(non_snake_case_functions)]
#[no_mangle]
pub extern "C" fn _imp___onexit() {
}
#[no_mangle]
pub extern "C" fn __dllonexit() {
}
#[no_mangle]
pub extern "C" fn __setusermatherr() {
} | {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} | conditional_block |
ImageMap-dbg.js | /*!
* SAP UI development toolkit for HTML5 (SAPUI5/OpenUI5)
* (c) Copyright 2009-2014 SAP AG or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
/* ----------------------------------------------------------------------------------
* Hint: This is a derived (generated) file. Changes should be done in the underlying
* source files only (*.control, *.js) or they will be lost after the next generation.
* ---------------------------------------------------------------------------------- */
// Provides control sap.ui.commons.ImageMap.
jQuery.sap.declare("sap.ui.commons.ImageMap");
jQuery.sap.require("sap.ui.commons.library");
jQuery.sap.require("sap.ui.core.Control");
/**
* Constructor for a new ImageMap.
*
* Accepts an object literal <code>mSettings</code> that defines initial
* property values, aggregated and associated objects as well as event handlers.
*
* If the name of a setting is ambiguous (e.g. a property has the same name as an event),
* then the framework assumes property, aggregation, association, event in that order.
* To override this automatic resolution, one of the prefixes "aggregation:", "association:"
* or "event:" can be added to the name of the setting (such a prefixed name must be
* enclosed in single or double quotes).
*
* The supported settings are:
* <ul>
* <li>Properties
* <ul>
* <li>{@link #getName name} : string</li></ul>
* </li>
* <li>Aggregations
* <ul>
* <li>{@link #getAreas areas} : sap.ui.commons.Area[]</li></ul>
* </li>
* <li>Associations
* <ul></ul>
* </li>
* <li>Events
* <ul>
* <li>{@link sap.ui.commons.ImageMap#event:press press} : fnListenerFunction or [fnListenerFunction, oListenerObject] or [oData, fnListenerFunction, oListenerObject]</li></ul>
* </li>
* </ul>
*
* @param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* Combination of image areas where at runtime these areas are starting points for hyperlinks or actions
* @extends sap.ui.core.Control
*
* @author SAP AG
* @version 1.20.7
*
* @constructor
* @public
* @name sap.ui.commons.ImageMap
*/
sap.ui.core.Control.extend("sap.ui.commons.ImageMap", { metadata : {
// ---- object ----
publicMethods : [
// methods
"createArea"
],
// ---- control specific ----
library : "sap.ui.commons",
properties : {
"name" : {type : "string", group : "Misc", defaultValue : null}
},
aggregations : {
"areas" : {type : "sap.ui.commons.Area", multiple : true, singularName : "area"}
},
events : {
"press" : {}
}
}});
/**
* Creates a new subclass of class sap.ui.commons.ImageMap with name <code>sClassName</code>
* and enriches it with the information contained in <code>oClassInfo</code>.
*
* <code>oClassInfo</code> might contain the same kind of informations as described in {@link sap.ui.core.Element.extend Element.extend}.
*
* @param {string} sClassName name of the class to be created
* @param {object} [oClassInfo] object literal with informations about the class
* @param {function} [FNMetaImpl] constructor function for the metadata object. If not given, it defaults to sap.ui.core.ElementMetadata.
* @return {function} the created class / constructor function
* @public
* @static
* @name sap.ui.commons.ImageMap.extend
* @function
*/
sap.ui.commons.ImageMap.M_EVENTS = {'press':'press'};
/**
* Getter for property <code>name</code>.
* Name for the image that serves as reference
*
* Default value is empty/<code>undefined</code>
*
* @return {string} the value of property <code>name</code>
* @public
* @name sap.ui.commons.ImageMap#getName
* @function
*/
/**
* Setter for property <code>name</code>.
*
* Default value is empty/<code>undefined</code>
*
* @param {string} sName new value for property <code>name</code>
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#setName
* @function
*/
/**
* Getter for aggregation <code>areas</code>.<br/>
* Area representing the reference to the target location
*
* @return {sap.ui.commons.Area[]}
* @public
* @name sap.ui.commons.ImageMap#getAreas
* @function
*/
/**
* Inserts a area into the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to insert; if empty, nothing is inserted
* @param {int}
* iIndex the <code>0</code>-based index the area should be inserted at; for
* a negative value of <code>iIndex</code>, the area is inserted at position 0; for a value
* greater than the current size of the aggregation, the area is inserted at
* the last position
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#insertArea
* @function
*/
/**
* Adds some area <code>oArea</code>
* to the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to add; if empty, nothing is inserted
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#addArea
* @function
*/
/**
* Removes an area from the aggregation named <code>areas</code>.
*
* @param {int | string | sap.ui.commons.Area} vArea the area to remove or its index or id
* @return {sap.ui.commons.Area} the removed area or null
* @public
* @name sap.ui.commons.ImageMap#removeArea
* @function
*/
/**
* Removes all the controls in the aggregation named <code>areas</code>.<br/>
* Additionally unregisters them from the hosting UIArea.
* @return {sap.ui.commons.Area[]} an array of the removed elements (might be empty)
* @public
* @name sap.ui.commons.ImageMap#removeAllAreas
* @function
*/
/**
* Checks for the provided <code>sap.ui.commons.Area</code> in the aggregation named <code>areas</code>
* and returns its index if found or -1 otherwise.
*
* @param {sap.ui.commons.Area}
* oArea the area whose index is looked for.
* @return {int} the index of the provided control in the aggregation if found, or -1 otherwise
* @public
* @name sap.ui.commons.ImageMap#indexOfArea
* @function
*/
/**
* Destroys all the areas in the aggregation
* named <code>areas</code>.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#destroyAreas
* @function
*/
/**
* Event for the areas that can be clicked in an ImageMap
*
* @name sap.ui.commons.ImageMap#press
* @event
* @param {sap.ui.base.Event} oControlEvent
* @param {sap.ui.base.EventProvider} oControlEvent.getSource
* @param {object} oControlEvent.getParameters
* @param {string} oControlEvent.getParameters.areaId Id of clicked Area.
* @public
*/
/**
* Attach event handler <code>fnFunction</code> to the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>.
* When called, the context of the event handler (its <code>this</code>) will be bound to <code>oListener<code> if specified
* otherwise to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* Event for the areas that can be clicked in an ImageMap
*
* @param {object}
* [oData] An application specific payload object, that will be passed to the event handler along with the event object when firing the event.
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* [oListener] Context object to call the event handler with. Defaults to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#attachPress
* @function
*/
/**
* Detach event handler <code>fnFunction</code> from the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>
*
* The passed function and listener object must match the ones used for event registration.
*
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* oListener Context object on which the given function had to be called.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#detachPress
* @function
*/
/**
* Fire event press to attached listeners.
*
* Expects following event parameters:
* <ul>
* <li>'areaId' of type <code>string</code> Id of clicked Area.</li>
* </ul>
*
* @param {Map} [mArguments] the arguments to pass along with the event.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @protected
* @name sap.ui.commons.ImageMap#firePress
* @function
*/
/**
* Adds an area to the ImageMap
*
* @name sap.ui.commons.ImageMap.prototype.createArea
* @function
* @param {string[]}
* aArea
*
| * @public
*/
// Start of sap\ui\commons\ImageMap.js
jQuery.sap.require("sap.ui.core.delegate.ItemNavigation");
/**
* Adds areas to the Image Map. Each argument must be either a JSon object or a
* list of objects or the area element or elements.
*
* @param {sap.ui.commons.Area|string} Area to add
* @return {sap.ui.commons.Area} <code>this</code> to allow method chaining
* @public
*/
sap.ui.commons.ImageMap.prototype.createArea = function() {
var oArea = new sap.ui.commons.Area();
for ( var i = 0; i < arguments.length; i++) {
var oContent = arguments[i];
var oArea;
if (oContent instanceof sap.ui.commons.Area) {
oArea = oContent;
} else {
oArea = new sap.ui.commons.Area(oContent);
}
this.addArea(oArea);
}
return this;
};
/**
* Used for after-rendering initialization.
*
* @private
*/
sap.ui.commons.ImageMap.prototype.onAfterRendering = function() {
this.oDomRef = this.getDomRef();
// Initialize the ItemNavigation if does not exist yet
if (!this.oItemNavigation) {
this.oItemNavigation = new sap.ui.core.delegate.ItemNavigation();
}
if (!!sap.ui.Device.browser.internet_explorer) {
var that = this;
var aImageControls = [];
this.oItemNavigation.setTabIndex0();
// Find the Image control and add delegate to it
var $Images = jQuery("img[useMap=#" + this.getName() + "]");
$Images.each(function(i, image) {
var id = image.getAttribute("id");
var imageControl = sap.ui.getCore().byId(id);
imageControl.addDelegate(that.oItemNavigation);
that.oItemNavigation.setRootDomRef(image);
aImageControls.push(imageControl);
});
this.aImageControls = aImageControls;
} else {
this.addDelegate(this.oItemNavigation);
this.oItemNavigation.setRootDomRef(this.oDomRef);
}
// Set navigations items = Areas inside of Image map
var aItemDomRefs = [];
var aAllAreas = this.getAreas();
for ( var i = 0; i < aAllAreas.length; i++) {
var oDomRef = aAllAreas[i].getFocusDomRef();
if (oDomRef) { // separators return null here
aItemDomRefs.push(oDomRef);
}
}
this.oItemNavigation.setItemDomRefs(aItemDomRefs);
this.oItemNavigation.setCycling(true);
this.oItemNavigation.setSelectedIndex(-1);
this.oItemNavigation.setFocusedIndex(-1);
};
/**
* Does all the cleanup when the Image Map is to be destroyed. Called from the
* element's destroy() method.
*
* @private
*/
sap.ui.commons.ImageMap.prototype.exit = function() {
// Remove the item navigation delegate
if (this.oItemNavigation) {
if (!!sap.ui.Device.browser.internet_explorer) {
for ( var i = 0; i < this.aImageControls.length; i++) {
this.aImageControls[i].removeDelegate(this.oItemNavigation);
}
} else {
this.removeDelegate(this.oItemNavigation);
}
this.oItemNavigation.destroy();
delete this.oItemNavigation;
}
// No super.exit() to call
}; | * @type void | random_line_split |
ImageMap-dbg.js | /*!
* SAP UI development toolkit for HTML5 (SAPUI5/OpenUI5)
* (c) Copyright 2009-2014 SAP AG or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
/* ----------------------------------------------------------------------------------
* Hint: This is a derived (generated) file. Changes should be done in the underlying
* source files only (*.control, *.js) or they will be lost after the next generation.
* ---------------------------------------------------------------------------------- */
// Provides control sap.ui.commons.ImageMap.
jQuery.sap.declare("sap.ui.commons.ImageMap");
jQuery.sap.require("sap.ui.commons.library");
jQuery.sap.require("sap.ui.core.Control");
/**
* Constructor for a new ImageMap.
*
* Accepts an object literal <code>mSettings</code> that defines initial
* property values, aggregated and associated objects as well as event handlers.
*
* If the name of a setting is ambiguous (e.g. a property has the same name as an event),
* then the framework assumes property, aggregation, association, event in that order.
* To override this automatic resolution, one of the prefixes "aggregation:", "association:"
* or "event:" can be added to the name of the setting (such a prefixed name must be
* enclosed in single or double quotes).
*
* The supported settings are:
* <ul>
* <li>Properties
* <ul>
* <li>{@link #getName name} : string</li></ul>
* </li>
* <li>Aggregations
* <ul>
* <li>{@link #getAreas areas} : sap.ui.commons.Area[]</li></ul>
* </li>
* <li>Associations
* <ul></ul>
* </li>
* <li>Events
* <ul>
* <li>{@link sap.ui.commons.ImageMap#event:press press} : fnListenerFunction or [fnListenerFunction, oListenerObject] or [oData, fnListenerFunction, oListenerObject]</li></ul>
* </li>
* </ul>
*
* @param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* Combination of image areas where at runtime these areas are starting points for hyperlinks or actions
* @extends sap.ui.core.Control
*
* @author SAP AG
* @version 1.20.7
*
* @constructor
* @public
* @name sap.ui.commons.ImageMap
*/
sap.ui.core.Control.extend("sap.ui.commons.ImageMap", { metadata : {
// ---- object ----
publicMethods : [
// methods
"createArea"
],
// ---- control specific ----
library : "sap.ui.commons",
properties : {
"name" : {type : "string", group : "Misc", defaultValue : null}
},
aggregations : {
"areas" : {type : "sap.ui.commons.Area", multiple : true, singularName : "area"}
},
events : {
"press" : {}
}
}});
/**
* Creates a new subclass of class sap.ui.commons.ImageMap with name <code>sClassName</code>
* and enriches it with the information contained in <code>oClassInfo</code>.
*
* <code>oClassInfo</code> might contain the same kind of informations as described in {@link sap.ui.core.Element.extend Element.extend}.
*
* @param {string} sClassName name of the class to be created
* @param {object} [oClassInfo] object literal with informations about the class
* @param {function} [FNMetaImpl] constructor function for the metadata object. If not given, it defaults to sap.ui.core.ElementMetadata.
* @return {function} the created class / constructor function
* @public
* @static
* @name sap.ui.commons.ImageMap.extend
* @function
*/
sap.ui.commons.ImageMap.M_EVENTS = {'press':'press'};
/**
* Getter for property <code>name</code>.
* Name for the image that serves as reference
*
* Default value is empty/<code>undefined</code>
*
* @return {string} the value of property <code>name</code>
* @public
* @name sap.ui.commons.ImageMap#getName
* @function
*/
/**
* Setter for property <code>name</code>.
*
* Default value is empty/<code>undefined</code>
*
* @param {string} sName new value for property <code>name</code>
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#setName
* @function
*/
/**
* Getter for aggregation <code>areas</code>.<br/>
* Area representing the reference to the target location
*
* @return {sap.ui.commons.Area[]}
* @public
* @name sap.ui.commons.ImageMap#getAreas
* @function
*/
/**
* Inserts a area into the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to insert; if empty, nothing is inserted
* @param {int}
* iIndex the <code>0</code>-based index the area should be inserted at; for
* a negative value of <code>iIndex</code>, the area is inserted at position 0; for a value
* greater than the current size of the aggregation, the area is inserted at
* the last position
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#insertArea
* @function
*/
/**
* Adds some area <code>oArea</code>
* to the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to add; if empty, nothing is inserted
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#addArea
* @function
*/
/**
* Removes an area from the aggregation named <code>areas</code>.
*
* @param {int | string | sap.ui.commons.Area} vArea the area to remove or its index or id
* @return {sap.ui.commons.Area} the removed area or null
* @public
* @name sap.ui.commons.ImageMap#removeArea
* @function
*/
/**
* Removes all the controls in the aggregation named <code>areas</code>.<br/>
* Additionally unregisters them from the hosting UIArea.
* @return {sap.ui.commons.Area[]} an array of the removed elements (might be empty)
* @public
* @name sap.ui.commons.ImageMap#removeAllAreas
* @function
*/
/**
* Checks for the provided <code>sap.ui.commons.Area</code> in the aggregation named <code>areas</code>
* and returns its index if found or -1 otherwise.
*
* @param {sap.ui.commons.Area}
* oArea the area whose index is looked for.
* @return {int} the index of the provided control in the aggregation if found, or -1 otherwise
* @public
* @name sap.ui.commons.ImageMap#indexOfArea
* @function
*/
/**
* Destroys all the areas in the aggregation
* named <code>areas</code>.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#destroyAreas
* @function
*/
/**
* Event for the areas that can be clicked in an ImageMap
*
* @name sap.ui.commons.ImageMap#press
* @event
* @param {sap.ui.base.Event} oControlEvent
* @param {sap.ui.base.EventProvider} oControlEvent.getSource
* @param {object} oControlEvent.getParameters
* @param {string} oControlEvent.getParameters.areaId Id of clicked Area.
* @public
*/
/**
* Attach event handler <code>fnFunction</code> to the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>.
* When called, the context of the event handler (its <code>this</code>) will be bound to <code>oListener<code> if specified
* otherwise to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* Event for the areas that can be clicked in an ImageMap
*
* @param {object}
* [oData] An application specific payload object, that will be passed to the event handler along with the event object when firing the event.
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* [oListener] Context object to call the event handler with. Defaults to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#attachPress
* @function
*/
/**
* Detach event handler <code>fnFunction</code> from the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>
*
* The passed function and listener object must match the ones used for event registration.
*
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* oListener Context object on which the given function had to be called.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#detachPress
* @function
*/
/**
* Fire event press to attached listeners.
*
* Expects following event parameters:
* <ul>
* <li>'areaId' of type <code>string</code> Id of clicked Area.</li>
* </ul>
*
* @param {Map} [mArguments] the arguments to pass along with the event.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @protected
* @name sap.ui.commons.ImageMap#firePress
* @function
*/
/**
* Adds an area to the ImageMap
*
* @name sap.ui.commons.ImageMap.prototype.createArea
* @function
* @param {string[]}
* aArea
*
* @type void
* @public
*/
// Start of sap\ui\commons\ImageMap.js
jQuery.sap.require("sap.ui.core.delegate.ItemNavigation");
/**
* Adds areas to the Image Map. Each argument must be either a JSon object or a
* list of objects or the area element or elements.
*
* @param {sap.ui.commons.Area|string} Area to add
* @return {sap.ui.commons.Area} <code>this</code> to allow method chaining
* @public
*/
sap.ui.commons.ImageMap.prototype.createArea = function() {
var oArea = new sap.ui.commons.Area();
for ( var i = 0; i < arguments.length; i++) {
var oContent = arguments[i];
var oArea;
if (oContent instanceof sap.ui.commons.Area) {
oArea = oContent;
} else |
this.addArea(oArea);
}
return this;
};
/**
* Used for after-rendering initialization.
*
* @private
*/
sap.ui.commons.ImageMap.prototype.onAfterRendering = function() {
this.oDomRef = this.getDomRef();
// Initialize the ItemNavigation if does not exist yet
if (!this.oItemNavigation) {
this.oItemNavigation = new sap.ui.core.delegate.ItemNavigation();
}
if (!!sap.ui.Device.browser.internet_explorer) {
var that = this;
var aImageControls = [];
this.oItemNavigation.setTabIndex0();
// Find the Image control and add delegate to it
var $Images = jQuery("img[useMap=#" + this.getName() + "]");
$Images.each(function(i, image) {
var id = image.getAttribute("id");
var imageControl = sap.ui.getCore().byId(id);
imageControl.addDelegate(that.oItemNavigation);
that.oItemNavigation.setRootDomRef(image);
aImageControls.push(imageControl);
});
this.aImageControls = aImageControls;
} else {
this.addDelegate(this.oItemNavigation);
this.oItemNavigation.setRootDomRef(this.oDomRef);
}
// Set navigations items = Areas inside of Image map
var aItemDomRefs = [];
var aAllAreas = this.getAreas();
for ( var i = 0; i < aAllAreas.length; i++) {
var oDomRef = aAllAreas[i].getFocusDomRef();
if (oDomRef) { // separators return null here
aItemDomRefs.push(oDomRef);
}
}
this.oItemNavigation.setItemDomRefs(aItemDomRefs);
this.oItemNavigation.setCycling(true);
this.oItemNavigation.setSelectedIndex(-1);
this.oItemNavigation.setFocusedIndex(-1);
};
/**
* Does all the cleanup when the Image Map is to be destroyed. Called from the
* element's destroy() method.
*
* @private
*/
sap.ui.commons.ImageMap.prototype.exit = function() {
// Remove the item navigation delegate
if (this.oItemNavigation) {
if (!!sap.ui.Device.browser.internet_explorer) {
for ( var i = 0; i < this.aImageControls.length; i++) {
this.aImageControls[i].removeDelegate(this.oItemNavigation);
}
} else {
this.removeDelegate(this.oItemNavigation);
}
this.oItemNavigation.destroy();
delete this.oItemNavigation;
}
// No super.exit() to call
}; | {
oArea = new sap.ui.commons.Area(oContent);
} | conditional_block |
app-engine.js | /*
* -------------------------------------------------------------------------
* This is the data for an errorMessage
* -------------------------------------------------------------------------
*/
var errorResponses = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "You're yelp request didn't go through. Please try again later, or just hang out with the crew.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}, {
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, {
'id': '12309120',
'name': "Artie's House",
'snippet_text': "Artie has an abode to be ado#EE8060. It's like nothing you've ever experienced. Not only does it have year round views of Canada's inc#EE8060ible Glacier National Park, but it is also has a pool. Artie is sure to have his humans keep a stockpile of treats always on the ready.",
'image_url': "https://s-media-cache-ak0.pinimg.com/originals/0d/80/1e/0d801ec8b8d40c12e67d4dffb994d31d.jpg",
'address1': '23 TransCanada Highway',
'cityState': 'Revelstoke, BC',
'display_phone': '250 837 7500',
'url': 'pc.gc.ca',
'location': {
'coordinate': {
'latitude': 51.209417,
'longitude': -117.723987,
},
'display_address': ['23 TransCanada Highway'],
}
}, {
'id': '12309120',
'name': "Beacon's House #2",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, ];
var noMatchFilterResponse = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "No titles matched in these results. We only show the top 15 results from Yelp based on ratings. Try another search. There's totally something out there.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}];
var response = [{
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}];
/**
* --------------------------------------------------------------------
* placeCard is a data object used to display results in the view
* --------------------------------------------------------------------
**/
var placeCard = function(data) {
var that = this;
this.name = ko.observable(data.name);
this.id = ko.observable(data.id);
this.idSelector = ko.computed(function() {
return "#" + data.id;
});
this.description = ko.observable(data.snippet_text);
this.imgSrc = ko.computed(function() {
return data.image_url.replace('ms.jpg', 'l.jpg');
});
this.imgAltTag = ko.computed(function() {
return 'Photo of ' + data.name;
});
this.address1 = ko.observable(data.location.display_address[0]);
this.city = ko.observable(data.location.city);
this.state = ko.observable(data.location.state_code);
this.zip = ko.observable(data.location.postal_code);
this.address2 = ko.computed(function() {
return that.city() + ", " + that.state() + " " + that.zip();
});
this.phone = ko.observable(data.display_phone);
this.webURL = ko.observable(data.url);
this.location = {
coordinate: {
latitude: data.location.coordinate.latitude,
longitude: data.location.coordinate.longitude,
},
address: data.location.display_address[0] +
'<br>' + data.location.display_address[data.location.display_address.length - 1]
};
this.review = {
img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function | (url, yData) {
$.ajax({
'timeout': 3000,
'type': 'GET',
'url': url,
'data': yData,
'dataType': 'jsonp',
'global': true,
'cache': true,
'jsonpCallback': 'cb',
'success': function(data) {
makeYelpList(data);
},
'error': function() {
makeErrorList();
alert("oh no! the yelp request failed. Please try again later.");
},
});
}
/*
* --------------------------------------------------------
* Changes out the resultList with a new yelp results
* --------------------------------------------------------
*/
function makeYelpList(d) {
response = d.businesses; // push ajax response to the global var 'response'
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
originalList.removeAll();
clearAllMarkers(); // clears marker array
/* --- Display the search results --- */
response.forEach(function(place) { // place cards into observables
resultList.push(new placeCard(place));
originalList.push(new placeCard(place));
});
scrollingTriggersMarkers(); // activate scroll position monitor triggers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function makeErrorList() {
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
/*
* -------------------------
* Inital Call to Yelp
* -------------------------
*/
yelpAjax(searchFor(), searchNear()); // onload initalize with starting Yelp Results
/*
* -------------------------------------------------------------------------
* This section handles requests to Google Maps and the related markers
* -------------------------------------------------------------------------
*/
/* --- google map keys --- */
var googleMapsAPIKey = 'AIzaSyClMls0bXZ3jgznlsLiP0ZgRrzSgUGFMbU';
var googleMapsgeocodeKey = 'AIzaSyBEXHFmzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
}
}
});
}
/** ----------------------------------------------------------------------------
* Handles changing mapShift vars in responsive manner using matchMedia
* ----------------------------------------------------------------------------
*/
function reformatOnSize() {
if (window.matchMedia("(min-width: 680px)").matches) { // for "big" screen
mapShift = {
right: 0.08,
up: 0.04
};
scrollAdjustment = 0;
map.setZoom(12);
$('#map').removeClass("fixed");
} else if (window.matchMedia("(orientation: portrait)").matches) { // small screen portrait
mapShift = {
right: -0.01,
up: 0.01
};
scrollAdjustment = 260;
map.setZoom(11);
}
else { // small screen landscape
mapShift = {
right: 0.09,
up: 0
};
scrollAdjustment = 0;
map.setZoom(11);
$('#map').removeClass("fixed");
}
}
$(window).resize(function() {
reformatOnSize();
});
/* --- force scroll the DOM to the top --- */
function forceTop() {
$('html, body').animate({
scrollTop: $('body').offset().top,
}, 200);
}
/* --- googleError in case it breaks --- */
function googleError() {
alert("Google Maps did not load");
}
| yJax | identifier_name |
app-engine.js | /*
* -------------------------------------------------------------------------
* This is the data for an errorMessage
* -------------------------------------------------------------------------
*/
var errorResponses = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "You're yelp request didn't go through. Please try again later, or just hang out with the crew.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}, {
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, {
'id': '12309120',
'name': "Artie's House",
'snippet_text': "Artie has an abode to be ado#EE8060. It's like nothing you've ever experienced. Not only does it have year round views of Canada's inc#EE8060ible Glacier National Park, but it is also has a pool. Artie is sure to have his humans keep a stockpile of treats always on the ready.",
'image_url': "https://s-media-cache-ak0.pinimg.com/originals/0d/80/1e/0d801ec8b8d40c12e67d4dffb994d31d.jpg",
'address1': '23 TransCanada Highway',
'cityState': 'Revelstoke, BC',
'display_phone': '250 837 7500',
'url': 'pc.gc.ca',
'location': {
'coordinate': {
'latitude': 51.209417,
'longitude': -117.723987,
},
'display_address': ['23 TransCanada Highway'],
}
}, {
'id': '12309120',
'name': "Beacon's House #2",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, ];
var noMatchFilterResponse = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "No titles matched in these results. We only show the top 15 results from Yelp based on ratings. Try another search. There's totally something out there.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}];
var response = [{
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}];
/**
* --------------------------------------------------------------------
* placeCard is a data object used to display results in the view
* --------------------------------------------------------------------
**/
var placeCard = function(data) {
var that = this;
this.name = ko.observable(data.name);
this.id = ko.observable(data.id);
this.idSelector = ko.computed(function() {
return "#" + data.id;
});
this.description = ko.observable(data.snippet_text);
this.imgSrc = ko.computed(function() {
return data.image_url.replace('ms.jpg', 'l.jpg');
});
this.imgAltTag = ko.computed(function() {
return 'Photo of ' + data.name;
});
this.address1 = ko.observable(data.location.display_address[0]);
this.city = ko.observable(data.location.city);
this.state = ko.observable(data.location.state_code);
this.zip = ko.observable(data.location.postal_code);
this.address2 = ko.computed(function() {
return that.city() + ", " + that.state() + " " + that.zip();
});
this.phone = ko.observable(data.display_phone);
this.webURL = ko.observable(data.url);
this.location = {
coordinate: {
latitude: data.location.coordinate.latitude,
longitude: data.location.coordinate.longitude,
},
address: data.location.display_address[0] +
'<br>' + data.location.display_address[data.location.display_address.length - 1]
};
this.review = {
img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function yJax(url, yData) {
$.ajax({
'timeout': 3000,
'type': 'GET',
'url': url,
'data': yData,
'dataType': 'jsonp',
'global': true,
'cache': true,
'jsonpCallback': 'cb',
'success': function(data) {
makeYelpList(data);
},
'error': function() {
makeErrorList();
alert("oh no! the yelp request failed. Please try again later.");
},
});
}
/*
* --------------------------------------------------------
* Changes out the resultList with a new yelp results
* --------------------------------------------------------
*/
function makeYelpList(d) {
response = d.businesses; // push ajax response to the global var 'response'
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
originalList.removeAll();
clearAllMarkers(); // clears marker array
/* --- Display the search results --- */
response.forEach(function(place) { // place cards into observables
resultList.push(new placeCard(place));
originalList.push(new placeCard(place));
});
scrollingTriggersMarkers(); // activate scroll position monitor triggers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function makeErrorList() {
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
/*
* -------------------------
* Inital Call to Yelp
* -------------------------
*/
yelpAjax(searchFor(), searchNear()); // onload initalize with starting Yelp Results
/*
* -------------------------------------------------------------------------
* This section handles requests to Google Maps and the related markers
* -------------------------------------------------------------------------
*/
/* --- google map keys --- */
var googleMapsAPIKey = 'AIzaSyClMls0bXZ3jgznlsLiP0ZgRrzSgUGFMbU';
var googleMapsgeocodeKey = 'AIzaSyBEXHFmzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) |
}
});
}
/** ----------------------------------------------------------------------------
* Handles changing mapShift vars in responsive manner using matchMedia
* ----------------------------------------------------------------------------
*/
function reformatOnSize() {
if (window.matchMedia("(min-width: 680px)").matches) { // for "big" screen
mapShift = {
right: 0.08,
up: 0.04
};
scrollAdjustment = 0;
map.setZoom(12);
$('#map').removeClass("fixed");
} else if (window.matchMedia("(orientation: portrait)").matches) { // small screen portrait
mapShift = {
right: -0.01,
up: 0.01
};
scrollAdjustment = 260;
map.setZoom(11);
}
else { // small screen landscape
mapShift = {
right: 0.09,
up: 0
};
scrollAdjustment = 0;
map.setZoom(11);
$('#map').removeClass("fixed");
}
}
$(window).resize(function() {
reformatOnSize();
});
/* --- force scroll the DOM to the top --- */
function forceTop() {
$('html, body').animate({
scrollTop: $('body').offset().top,
}, 200);
}
/* --- googleError in case it breaks --- */
function googleError() {
alert("Google Maps did not load");
}
| { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
} | conditional_block |
app-engine.js | /*
* -------------------------------------------------------------------------
* This is the data for an errorMessage
* -------------------------------------------------------------------------
*/
var errorResponses = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "You're yelp request didn't go through. Please try again later, or just hang out with the crew.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}, {
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, {
'id': '12309120',
'name': "Artie's House",
'snippet_text': "Artie has an abode to be ado#EE8060. It's like nothing you've ever experienced. Not only does it have year round views of Canada's inc#EE8060ible Glacier National Park, but it is also has a pool. Artie is sure to have his humans keep a stockpile of treats always on the ready.",
'image_url': "https://s-media-cache-ak0.pinimg.com/originals/0d/80/1e/0d801ec8b8d40c12e67d4dffb994d31d.jpg",
'address1': '23 TransCanada Highway',
'cityState': 'Revelstoke, BC',
'display_phone': '250 837 7500',
'url': 'pc.gc.ca',
'location': {
'coordinate': {
'latitude': 51.209417,
'longitude': -117.723987,
},
'display_address': ['23 TransCanada Highway'],
}
}, {
'id': '12309120',
'name': "Beacon's House #2",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, ];
var noMatchFilterResponse = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "No titles matched in these results. We only show the top 15 results from Yelp based on ratings. Try another search. There's totally something out there.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}];
var response = [{
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}];
/**
* --------------------------------------------------------------------
* placeCard is a data object used to display results in the view
* --------------------------------------------------------------------
**/
var placeCard = function(data) {
var that = this;
this.name = ko.observable(data.name);
this.id = ko.observable(data.id);
this.idSelector = ko.computed(function() {
return "#" + data.id;
});
this.description = ko.observable(data.snippet_text);
this.imgSrc = ko.computed(function() {
return data.image_url.replace('ms.jpg', 'l.jpg');
});
this.imgAltTag = ko.computed(function() {
return 'Photo of ' + data.name;
});
this.address1 = ko.observable(data.location.display_address[0]);
this.city = ko.observable(data.location.city);
this.state = ko.observable(data.location.state_code);
this.zip = ko.observable(data.location.postal_code);
this.address2 = ko.computed(function() {
return that.city() + ", " + that.state() + " " + that.zip();
});
this.phone = ko.observable(data.display_phone);
this.webURL = ko.observable(data.url);
this.location = {
coordinate: {
latitude: data.location.coordinate.latitude,
longitude: data.location.coordinate.longitude,
},
address: data.location.display_address[0] +
'<br>' + data.location.display_address[data.location.display_address.length - 1]
};
this.review = {
img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function yJax(url, yData) {
$.ajax({
'timeout': 3000,
'type': 'GET',
'url': url,
'data': yData,
'dataType': 'jsonp',
'global': true,
'cache': true,
'jsonpCallback': 'cb',
'success': function(data) {
makeYelpList(data);
},
'error': function() {
makeErrorList();
alert("oh no! the yelp request failed. Please try again later.");
},
});
}
/*
* --------------------------------------------------------
* Changes out the resultList with a new yelp results
* --------------------------------------------------------
*/
function makeYelpList(d) {
response = d.businesses; // push ajax response to the global var 'response'
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
originalList.removeAll();
clearAllMarkers(); // clears marker array
/* --- Display the search results --- */
response.forEach(function(place) { // place cards into observables
resultList.push(new placeCard(place));
originalList.push(new placeCard(place));
});
scrollingTriggersMarkers(); // activate scroll position monitor triggers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function makeErrorList() |
/*
* -------------------------
* Inital Call to Yelp
* -------------------------
*/
yelpAjax(searchFor(), searchNear()); // onload initalize with starting Yelp Results
/*
* -------------------------------------------------------------------------
* This section handles requests to Google Maps and the related markers
* -------------------------------------------------------------------------
*/
/* --- google map keys --- */
var googleMapsAPIKey = 'AIzaSyClMls0bXZ3jgznlsLiP0ZgRrzSgUGFMbU';
var googleMapsgeocodeKey = 'AIzaSyBEXHFmzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
}
}
});
}
/** ----------------------------------------------------------------------------
* Handles changing mapShift vars in responsive manner using matchMedia
* ----------------------------------------------------------------------------
*/
function reformatOnSize() {
if (window.matchMedia("(min-width: 680px)").matches) { // for "big" screen
mapShift = {
right: 0.08,
up: 0.04
};
scrollAdjustment = 0;
map.setZoom(12);
$('#map').removeClass("fixed");
} else if (window.matchMedia("(orientation: portrait)").matches) { // small screen portrait
mapShift = {
right: -0.01,
up: 0.01
};
scrollAdjustment = 260;
map.setZoom(11);
}
else { // small screen landscape
mapShift = {
right: 0.09,
up: 0
};
scrollAdjustment = 0;
map.setZoom(11);
$('#map').removeClass("fixed");
}
}
$(window).resize(function() {
reformatOnSize();
});
/* --- force scroll the DOM to the top --- */
function forceTop() {
$('html, body').animate({
scrollTop: $('body').offset().top,
}, 200);
}
/* --- googleError in case it breaks --- */
function googleError() {
alert("Google Maps did not load");
}
| {
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
} | identifier_body |
app-engine.js | /*
* -------------------------------------------------------------------------
* This is the data for an errorMessage
* -------------------------------------------------------------------------
*/
var errorResponses = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "You're yelp request didn't go through. Please try again later, or just hang out with the crew.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}, {
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, {
'id': '12309120',
'name': "Artie's House",
'snippet_text': "Artie has an abode to be ado#EE8060. It's like nothing you've ever experienced. Not only does it have year round views of Canada's inc#EE8060ible Glacier National Park, but it is also has a pool. Artie is sure to have his humans keep a stockpile of treats always on the ready.",
'image_url': "https://s-media-cache-ak0.pinimg.com/originals/0d/80/1e/0d801ec8b8d40c12e67d4dffb994d31d.jpg",
'address1': '23 TransCanada Highway',
'cityState': 'Revelstoke, BC',
'display_phone': '250 837 7500',
'url': 'pc.gc.ca',
'location': {
'coordinate': {
'latitude': 51.209417,
'longitude': -117.723987,
},
'display_address': ['23 TransCanada Highway'],
}
}, {
'id': '12309120',
'name': "Beacon's House #2",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}, ];
var noMatchFilterResponse = [{
'id': '12309120',
'name': "OH NO!!",
'snippet_text': "No titles matched in these results. We only show the top 15 results from Yelp based on ratings. Try another search. There's totally something out there.",
'image_url': 'http://41.media.tumblr.com/1afb893857c5205fc19341c1c034ab70/tumblr_nye0u9CZBd1udod9xo1_1280.jpg',
'rating_img_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.962,
'longitude': -118.216,
},
'display_address': ['2955 Ski Town Canada'],
}
}];
var response = [{
'id': '12309120',
'name': "Beacon's House",
'snippet_text': "Beacon has a world class home. If it is found to be clean, it is shiny than the Trump towner and is filled with an infinite amount of treats as to attract a large amount of puppy friends. In the summer it is often hosting a BBQ and other lustrious affairs.",
'image_url': 'http://36.media.tumblr.com/170b376be332902fd8365a6db73b4164/tumblr_nyv6efMxPM1udod9xo1_1280.jpg',
'address1': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}];
/**
* --------------------------------------------------------------------
* placeCard is a data object used to display results in the view
* --------------------------------------------------------------------
**/
var placeCard = function(data) {
var that = this;
this.name = ko.observable(data.name);
this.id = ko.observable(data.id);
this.idSelector = ko.computed(function() {
return "#" + data.id;
});
this.description = ko.observable(data.snippet_text);
this.imgSrc = ko.computed(function() {
return data.image_url.replace('ms.jpg', 'l.jpg');
});
this.imgAltTag = ko.computed(function() {
return 'Photo of ' + data.name;
});
this.address1 = ko.observable(data.location.display_address[0]);
this.city = ko.observable(data.location.city);
this.state = ko.observable(data.location.state_code);
this.zip = ko.observable(data.location.postal_code);
this.address2 = ko.computed(function() {
return that.city() + ", " + that.state() + " " + that.zip();
});
this.phone = ko.observable(data.display_phone);
this.webURL = ko.observable(data.url);
this.location = {
coordinate: {
latitude: data.location.coordinate.latitude,
longitude: data.location.coordinate.longitude,
},
address: data.location.display_address[0] +
'<br>' + data.location.display_address[data.location.display_address.length - 1]
};
this.review = {
img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function yJax(url, yData) {
$.ajax({
'timeout': 3000,
'type': 'GET',
'url': url,
'data': yData,
'dataType': 'jsonp',
'global': true,
'cache': true,
'jsonpCallback': 'cb',
'success': function(data) {
makeYelpList(data);
},
'error': function() {
makeErrorList();
alert("oh no! the yelp request failed. Please try again later.");
},
});
}
/*
* --------------------------------------------------------
* Changes out the resultList with a new yelp results
* --------------------------------------------------------
*/
function makeYelpList(d) {
response = d.businesses; // push ajax response to the global var 'response'
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
originalList.removeAll();
clearAllMarkers(); // clears marker array
/* --- Display the search results --- */
response.forEach(function(place) { // place cards into observables
resultList.push(new placeCard(place));
originalList.push(new placeCard(place));
});
scrollingTriggersMarkers(); // activate scroll position monitor triggers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function makeErrorList() {
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
/*
* -------------------------
* Inital Call to Yelp
* -------------------------
*/
yelpAjax(searchFor(), searchNear()); // onload initalize with starting Yelp Results
/*
* -------------------------------------------------------------------------
* This section handles requests to Google Maps and the related markers
* -------------------------------------------------------------------------
*/
/* --- google map keys --- */
var googleMapsAPIKey = 'AIzaSyClMls0bXZ3jgznlsLiP0ZgRrzSgUGFMbU';
var googleMapsgeocodeKey = 'AIzaSyBEXHFmzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
}
}
});
}
/** ----------------------------------------------------------------------------
* Handles changing mapShift vars in responsive manner using matchMedia
* ----------------------------------------------------------------------------
*/
function reformatOnSize() {
if (window.matchMedia("(min-width: 680px)").matches) { // for "big" screen
mapShift = {
right: 0.08,
up: 0.04
};
scrollAdjustment = 0;
map.setZoom(12);
$('#map').removeClass("fixed");
} else if (window.matchMedia("(orientation: portrait)").matches) { // small screen portrait
mapShift = {
right: -0.01,
up: 0.01
};
scrollAdjustment = 260;
map.setZoom(11);
}
else { // small screen landscape
mapShift = {
right: 0.09, | scrollAdjustment = 0;
map.setZoom(11);
$('#map').removeClass("fixed");
}
}
$(window).resize(function() {
reformatOnSize();
});
/* --- force scroll the DOM to the top --- */
function forceTop() {
$('html, body').animate({
scrollTop: $('body').offset().top,
}, 200);
}
/* --- googleError in case it breaks --- */
function googleError() {
alert("Google Maps did not load");
} | up: 0
}; | random_line_split |
aml.py | import sys
class Node(object):
def __init__(self, name=""):
self.children = set()
self.parents = set()
self.name = name
self.dual = None
def addChild(self, node):
self.children.add(node)
def removeChild(self, node):
self.children.discard(node)
def addParent(self, node):
self.parents.add(node)
def removeParent(self, node):
self.parents.discard(node)
def addDual(self, node):
self.dual.add(node)
def __str__(self):
return self.name
def linkNodes(child, parent):
child.addParent(parent)
parent.addChild(child)
def linkDuals(node, dual):
node.dual = dual
dual.dual = node
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else: | linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name], term)
else:
consts[name] = Node(name=name)
dualConst = Node(name="["+name+"]")
linkDuals(consts[name], dualConst)
fullLink(consts[name], term)
if record[~0] == "1":
linkNodes(term.dual, consts["$"].dual)
layers["pterms"].add(term)
else:
layers["nterms"].add(term)
linkNodes(dualBase, term.dual)
layers["terms*"].add(dualTerm)
for const in consts:
fullLink(zero, consts[const])
layers["constants"].add(consts[const])
layers["constants*"].add(consts[const].dual)
layers["dualAtoms"].add(dualBase)
layers["atoms"].add(zero)
layers["atoms*"].add(zdual)
layers["$"] = consts["$"]
layers["Base"] = zero
return layers
inputFile = "invert.data"#sys.argv[1]
testFile = "invertTest.data"#sys.argv[2]
labels, records = readData(inputFile)
layers = createGraph(labels, records)
enforceNegativeTraceConstraints(layers)
enforcePositiveTraceContraints(layers)
print(verifyTraceConstraints(layers))
cross(layers)
#reduceAtoms(layers)
correct = 0
total = 0
with open(testFile, 'r') as file:
for line in file:
data = list(map(lambda x: x.strip(), line.split(",")))
classified = int(classify(data[:-1], labels, layers))
trueLabel = int(data[~0])
total += 1
correct += int(trueLabel == classified)
print(data[0], ":", classified, trueLabel == classified)
print("Error Margin:", correct / total)
obeserveAtoms(layers) | c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1 | random_line_split |
aml.py | import sys
class Node(object):
def __init__(self, name=""):
self.children = set()
self.parents = set()
self.name = name
self.dual = None
def addChild(self, node):
self.children.add(node)
def removeChild(self, node):
self.children.discard(node)
def addParent(self, node):
self.parents.add(node)
def removeParent(self, node):
self.parents.discard(node)
def addDual(self, node):
self.dual.add(node)
def __str__(self):
return self.name
def linkNodes(child, parent):
child.addParent(parent)
parent.addChild(child)
def linkDuals(node, dual):
node.dual = dual
dual.dual = node
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
|
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name], term)
else:
consts[name] = Node(name=name)
dualConst = Node(name="["+name+"]")
linkDuals(consts[name], dualConst)
fullLink(consts[name], term)
if record[~0] == "1":
linkNodes(term.dual, consts["$"].dual)
layers["pterms"].add(term)
else:
layers["nterms"].add(term)
linkNodes(dualBase, term.dual)
layers["terms*"].add(dualTerm)
for const in consts:
fullLink(zero, consts[const])
layers["constants"].add(consts[const])
layers["constants*"].add(consts[const].dual)
layers["dualAtoms"].add(dualBase)
layers["atoms"].add(zero)
layers["atoms*"].add(zdual)
layers["$"] = consts["$"]
layers["Base"] = zero
return layers
inputFile = "invert.data"#sys.argv[1]
testFile = "invertTest.data"#sys.argv[2]
labels, records = readData(inputFile)
layers = createGraph(labels, records)
enforceNegativeTraceConstraints(layers)
enforcePositiveTraceContraints(layers)
print(verifyTraceConstraints(layers))
cross(layers)
#reduceAtoms(layers)
correct = 0
total = 0
with open(testFile, 'r') as file:
for line in file:
data = list(map(lambda x: x.strip(), line.split(",")))
classified = int(classify(data[:-1], labels, layers))
trueLabel = int(data[~0])
total += 1
correct += int(trueLabel == classified)
print(data[0], ":", classified, trueLabel == classified)
print("Error Margin:", correct / total)
obeserveAtoms(layers)
| for parent in node.parents:
fullLink(child, parent) | conditional_block |
aml.py | import sys
class Node(object):
def __init__(self, name=""):
self.children = set()
self.parents = set()
self.name = name
self.dual = None
def addChild(self, node):
self.children.add(node)
def removeChild(self, node):
self.children.discard(node)
def addParent(self, node):
self.parents.add(node)
def removeParent(self, node):
self.parents.discard(node)
def addDual(self, node):
self.dual.add(node)
def __str__(self):
return self.name
def linkNodes(child, parent):
child.addParent(parent)
parent.addChild(child)
def | (node, dual):
node.dual = dual
dual.dual = node
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name], term)
else:
consts[name] = Node(name=name)
dualConst = Node(name="["+name+"]")
linkDuals(consts[name], dualConst)
fullLink(consts[name], term)
if record[~0] == "1":
linkNodes(term.dual, consts["$"].dual)
layers["pterms"].add(term)
else:
layers["nterms"].add(term)
linkNodes(dualBase, term.dual)
layers["terms*"].add(dualTerm)
for const in consts:
fullLink(zero, consts[const])
layers["constants"].add(consts[const])
layers["constants*"].add(consts[const].dual)
layers["dualAtoms"].add(dualBase)
layers["atoms"].add(zero)
layers["atoms*"].add(zdual)
layers["$"] = consts["$"]
layers["Base"] = zero
return layers
inputFile = "invert.data"#sys.argv[1]
testFile = "invertTest.data"#sys.argv[2]
labels, records = readData(inputFile)
layers = createGraph(labels, records)
enforceNegativeTraceConstraints(layers)
enforcePositiveTraceContraints(layers)
print(verifyTraceConstraints(layers))
cross(layers)
#reduceAtoms(layers)
correct = 0
total = 0
with open(testFile, 'r') as file:
for line in file:
data = list(map(lambda x: x.strip(), line.split(",")))
classified = int(classify(data[:-1], labels, layers))
trueLabel = int(data[~0])
total += 1
correct += int(trueLabel == classified)
print(data[0], ":", classified, trueLabel == classified)
print("Error Margin:", correct / total)
obeserveAtoms(layers)
| linkDuals | identifier_name |
aml.py | import sys
class Node(object):
def __init__(self, name=""):
self.children = set()
self.parents = set()
self.name = name
self.dual = None
def addChild(self, node):
self.children.add(node)
def removeChild(self, node):
self.children.discard(node)
def addParent(self, node):
self.parents.add(node)
def removeParent(self, node):
self.parents.discard(node)
def addDual(self, node):
self.dual.add(node)
def __str__(self):
return self.name
def linkNodes(child, parent):
child.addParent(parent)
parent.addChild(child)
def linkDuals(node, dual):
node.dual = dual
dual.dual = node
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
|
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name], term)
else:
consts[name] = Node(name=name)
dualConst = Node(name="["+name+"]")
linkDuals(consts[name], dualConst)
fullLink(consts[name], term)
if record[~0] == "1":
linkNodes(term.dual, consts["$"].dual)
layers["pterms"].add(term)
else:
layers["nterms"].add(term)
linkNodes(dualBase, term.dual)
layers["terms*"].add(dualTerm)
for const in consts:
fullLink(zero, consts[const])
layers["constants"].add(consts[const])
layers["constants*"].add(consts[const].dual)
layers["dualAtoms"].add(dualBase)
layers["atoms"].add(zero)
layers["atoms*"].add(zdual)
layers["$"] = consts["$"]
layers["Base"] = zero
return layers
inputFile = "invert.data"#sys.argv[1]
testFile = "invertTest.data"#sys.argv[2]
labels, records = readData(inputFile)
layers = createGraph(labels, records)
enforceNegativeTraceConstraints(layers)
enforcePositiveTraceContraints(layers)
print(verifyTraceConstraints(layers))
cross(layers)
#reduceAtoms(layers)
correct = 0
total = 0
with open(testFile, 'r') as file:
for line in file:
data = list(map(lambda x: x.strip(), line.split(",")))
classified = int(classify(data[:-1], labels, layers))
trueLabel = int(data[~0])
total += 1
correct += int(trueLabel == classified)
print(data[0], ":", classified, trueLabel == classified)
print("Error Margin:", correct / total)
obeserveAtoms(layers)
| with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records | identifier_body |
table.go | package table
import (
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"time"
slice "sort"
humanize "github.com/dustin/go-humanize"
cmc "github.com/miguelmota/go-coinmarketcap/pro/v1"
gc "github.com/rthornton128/goncurses"
pad "github.com/willf/pad/utf8"
)
var wg sync.WaitGroup
// Service service struct
type Service struct {
stdsrc *gc.Window
screenRows int
screenCols int
mainwin *gc.Window
menuwin *gc.Window
menuwinWidth int
menuwinHeight int
menusubwin *gc.Window
helpbarwin *gc.Window
helpwin *gc.Window
helpVisible bool
logwin *gc.Window
menu *gc.Menu
menuItems []*gc.MenuItem
menuData []string
menuHeader string
menuWidth int
menuHeight int
coins []*cmc.Coin
sortBy string
sortDesc bool
limit uint
refresh uint
primaryColor string
lastLog string
currentItem int
}
// Options options struct
type Options struct {
Color string
Limit uint
Refresh uint
}
var once sync.Once
// New returns new service
func New(opts *Options) *Service {
var instance *Service
// once.Do(func() {
instance = &Service{}
instance.primaryColor = opts.Color
instance.limit = opts.Limit
instance.refresh = opts.Refresh
// })
return instance
}
// Render starts GUI
func (s *Service) Render() error {
var err error
s.stdsrc, err = gc.Init()
defer gc.End()
if err != nil {
return err
}
gc.UseDefaultColors()
gc.StartColor()
s.setColorPairs()
gc.Raw(true)
gc.Echo(false)
gc.Cursor(0)
s.stdsrc.Keypad(true)
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.helpVisible = false
s.renderMainWindow()
err = s.fetchData()
if err != nil {
return nil
}
go func() {
ticker := time.NewTicker(time.Duration(int64(s.refresh)) * time.Minute)
for {
select {
case <-ticker.C:
//s.menuwin.Clear()
//s.menuwin.Refresh()
s.fetchData()
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
}
}()
s.sortBy = "rank"
s.sortDesc = false
s.setMenuData()
err = s.renderMenu()
if err != nil {
panic(err)
}
defer s.menu.UnPost()
wg.Add(1)
resizeChannel := make(chan os.Signal)
signal.Notify(resizeChannel, syscall.SIGWINCH)
go s.onWindowResize(resizeChannel)
s.renderLogWindow()
s.renderHelpBar()
s.renderHelpWindow()
//stdsrc.GetChar() // required so it doesn't exit
//wg.Wait()
fields := make([]*gc.Field, 2)
fields[0], _ = gc.NewField(1, 10, 4, 18, 0, 0)
defer fields[0].Free()
fields[0].SetForeground(gc.ColorPair(1))
fields[0].SetBackground(gc.ColorPair(2) | gc.A_UNDERLINE | gc.A_BOLD)
fields[0].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1], _ = gc.NewField(1, 10, 6, 18, 0, 0)
defer fields[1].Free()
fields[1].SetForeground(gc.ColorPair(1))
fields[1].SetBackground(gc.A_UNDERLINE)
fields[1].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1].SetPad('*')
form, _ := gc.NewForm(fields)
form.Post()
form.Driver(gc.REQ_FIRST_FIELD)
for {
gc.Update()
ch := s.menuwin.GetChar()
chstr := fmt.Sprint(ch)
//s.log(fmt.Sprint(ch))
switch {
case ch == gc.KEY_DOWN, chstr == "106": // "j"
if s.currentItem < len(s.menuItems)-1 {
s.currentItem = s.currentItem + 1
s.menu.Current(s.menuItems[s.currentItem])
}
form.Driver(gc.REQ_NEXT_FIELD)
form.Driver(gc.REQ_END_LINE)
case ch == gc.KEY_UP, chstr == "107": // "k"
if s.currentItem > 0 {
s.currentItem = s.currentItem - 1
s.menu.Current(s.menuItems[s.currentItem])
}
case ch == gc.KEY_RETURN, ch == gc.KEY_ENTER, chstr == "32":
s.menu.Driver(gc.REQ_TOGGLE)
for _, item := range s.menu.Items() {
if item.Value() {
s.handleClick(item.Index())
break
}
}
s.menu.Driver(gc.REQ_TOGGLE)
case chstr == "114": // "r"
s.handleSort("rank", false)
case chstr == "110": // "n"
s.handleSort("name", true)
case chstr == "115": // "s"
s.handleSort("symbol", false)
case chstr == "112": // "p
s.handleSort("price", true)
case chstr == "109": // "m
s.handleSort("marketcap", true)
case chstr == "118": // "v
s.handleSort("24hvolume", true)
case chstr == "49": // "1"
s.handleSort("1hchange", true)
case chstr == "50": // "2"
s.handleSort("24hchange", true)
case chstr == "55": // "7"
s.handleSort("7dchange", true)
case chstr == "116": // "t"
s.handleSort("totalsupply", true)
case chstr == "97": // "a"
s.handleSort("availablesupply", true)
case chstr == "108": // "l"
s.handleSort("lastupdated", true)
case chstr == "21": // ctrl-u
s.currentItem = s.currentItem - s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case fmt.Sprint(ch) == "4": // ctrl-d
s.currentItem = s.currentItem + s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case chstr == "104", chstr == "63": // "h", "?"
s.toggleHelp()
case chstr == "3", chstr == "113", chstr == "27": // ctrl-c, "q", esc
if s.helpVisible && chstr == "27" {
s.toggleHelp()
} else {
// quit
return nil
}
default:
s.menu.Driver(gc.DriverActions[ch])
}
}
}
func (s *Service) fetchData() error {
coins, err := cmc.GetAllCoinData(int(s.limit))
if err != nil {
return err
}
s.coins = []*cmc.Coin{}
for i := range coins {
coin := coins[i]
s.coins = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name {
s.sortDesc = !s.sortDesc
} else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) renderHelpWindow() error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply") | s.helpwin.MovePrint(18, 1, "<v> to sort by 24 hour volume")
s.helpwin.MovePrint(19, 1, "<q> or <esc> to quit application.")
s.helpwin.Refresh()
return nil
}
// OnWindowResize sends event to channel when resize event occurs
func (s *Service) onWindowResize(channel chan os.Signal) {
//stdScr, _ := gc.Init()
//stdScr.ScrollOk(true)
//gc.NewLines(true)
for {
<-channel
//gc.StdScr().Clear()
//rows, cols := gc.StdScr().MaxYX()
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.resizeWindows()
//gc.End()
//gc.Update()
//gc.StdScr().Refresh()
}
}
// RenderMenu renders menu
func (s *Service) renderMenu() error {
s.menuwinWidth = s.screenCols
s.menuwinHeight = s.screenRows - 1
s.menuWidth = s.screenCols
s.menuHeight = s.screenRows - 2
//if len(s.menuItems) == 0 {
items := make([]*gc.MenuItem, len(s.menuData))
var err error
for i, val := range s.menuData {
items[i], err = gc.NewItem(val, "")
if err != nil {
return err
}
//defer items[i].Free()
}
s.menuItems = items
//}
if s.menu == nil {
var err error
s.menu, err = gc.NewMenu(s.menuItems)
if err != nil {
return err
}
} else {
s.menu.UnPost()
s.menu.SetItems(s.menuItems)
s.menu.Current(s.menuItems[s.currentItem])
}
if s.menuwin == nil {
var err error
s.menuwin, err = gc.NewWindow(s.menuwinHeight, s.menuwinWidth, 0, 0)
s.menuwin.ScrollOk(true)
if err != nil {
return err
}
s.menuwin.Keypad(true)
s.menu.SetWindow(s.menuwin)
s.menusubwin = s.menuwin.Derived(s.menuHeight, s.menuWidth, 1, 0)
s.menu.SubWindow(s.menusubwin)
s.menu.Option(gc.O_ONEVALUE, false)
s.menu.Format(s.menuHeight, 0)
s.menu.Mark("")
} else {
s.menusubwin.Resize(s.menuHeight, s.menuWidth)
s.menuwin.Resize(s.menuHeight, s.menuWidth)
}
//s.menuwin.Clear()
s.menuwin.ColorOn(2)
s.menuwin.Box(0, 0)
s.menuwin.ColorOff(2)
s.menuwin.ColorOn(1)
s.menuwin.MovePrint(0, 0, s.menuHeader)
s.menuwin.ColorOff(1)
s.menuwin.ColorOn(2)
s.menuwin.MoveAddChar(2, 0, gc.ACS_LTEE)
//s.menuwin.HLine(2, 1, gc.ACS_HLINE, s.screenCols-6)
s.menuwin.ColorOff(2)
s.menu.Post()
s.menuwin.Refresh()
return nil
} | s.helpwin.MovePrint(17, 1, "<p> to sort by price") | random_line_split |
table.go | package table
import (
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"time"
slice "sort"
humanize "github.com/dustin/go-humanize"
cmc "github.com/miguelmota/go-coinmarketcap/pro/v1"
gc "github.com/rthornton128/goncurses"
pad "github.com/willf/pad/utf8"
)
var wg sync.WaitGroup
// Service service struct
type Service struct {
stdsrc *gc.Window
screenRows int
screenCols int
mainwin *gc.Window
menuwin *gc.Window
menuwinWidth int
menuwinHeight int
menusubwin *gc.Window
helpbarwin *gc.Window
helpwin *gc.Window
helpVisible bool
logwin *gc.Window
menu *gc.Menu
menuItems []*gc.MenuItem
menuData []string
menuHeader string
menuWidth int
menuHeight int
coins []*cmc.Coin
sortBy string
sortDesc bool
limit uint
refresh uint
primaryColor string
lastLog string
currentItem int
}
// Options options struct
type Options struct {
Color string
Limit uint
Refresh uint
}
var once sync.Once
// New returns new service
func New(opts *Options) *Service {
var instance *Service
// once.Do(func() {
instance = &Service{}
instance.primaryColor = opts.Color
instance.limit = opts.Limit
instance.refresh = opts.Refresh
// })
return instance
}
// Render starts GUI
func (s *Service) Render() error {
var err error
s.stdsrc, err = gc.Init()
defer gc.End()
if err != nil {
return err
}
gc.UseDefaultColors()
gc.StartColor()
s.setColorPairs()
gc.Raw(true)
gc.Echo(false)
gc.Cursor(0)
s.stdsrc.Keypad(true)
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.helpVisible = false
s.renderMainWindow()
err = s.fetchData()
if err != nil {
return nil
}
go func() {
ticker := time.NewTicker(time.Duration(int64(s.refresh)) * time.Minute)
for {
select {
case <-ticker.C:
//s.menuwin.Clear()
//s.menuwin.Refresh()
s.fetchData()
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
}
}()
s.sortBy = "rank"
s.sortDesc = false
s.setMenuData()
err = s.renderMenu()
if err != nil {
panic(err)
}
defer s.menu.UnPost()
wg.Add(1)
resizeChannel := make(chan os.Signal)
signal.Notify(resizeChannel, syscall.SIGWINCH)
go s.onWindowResize(resizeChannel)
s.renderLogWindow()
s.renderHelpBar()
s.renderHelpWindow()
//stdsrc.GetChar() // required so it doesn't exit
//wg.Wait()
fields := make([]*gc.Field, 2)
fields[0], _ = gc.NewField(1, 10, 4, 18, 0, 0)
defer fields[0].Free()
fields[0].SetForeground(gc.ColorPair(1))
fields[0].SetBackground(gc.ColorPair(2) | gc.A_UNDERLINE | gc.A_BOLD)
fields[0].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1], _ = gc.NewField(1, 10, 6, 18, 0, 0)
defer fields[1].Free()
fields[1].SetForeground(gc.ColorPair(1))
fields[1].SetBackground(gc.A_UNDERLINE)
fields[1].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1].SetPad('*')
form, _ := gc.NewForm(fields)
form.Post()
form.Driver(gc.REQ_FIRST_FIELD)
for {
gc.Update()
ch := s.menuwin.GetChar()
chstr := fmt.Sprint(ch)
//s.log(fmt.Sprint(ch))
switch {
case ch == gc.KEY_DOWN, chstr == "106": // "j"
if s.currentItem < len(s.menuItems)-1 {
s.currentItem = s.currentItem + 1
s.menu.Current(s.menuItems[s.currentItem])
}
form.Driver(gc.REQ_NEXT_FIELD)
form.Driver(gc.REQ_END_LINE)
case ch == gc.KEY_UP, chstr == "107": // "k"
if s.currentItem > 0 {
s.currentItem = s.currentItem - 1
s.menu.Current(s.menuItems[s.currentItem])
}
case ch == gc.KEY_RETURN, ch == gc.KEY_ENTER, chstr == "32":
s.menu.Driver(gc.REQ_TOGGLE)
for _, item := range s.menu.Items() {
if item.Value() {
s.handleClick(item.Index())
break
}
}
s.menu.Driver(gc.REQ_TOGGLE)
case chstr == "114": // "r"
s.handleSort("rank", false)
case chstr == "110": // "n"
s.handleSort("name", true)
case chstr == "115": // "s"
s.handleSort("symbol", false)
case chstr == "112": // "p
s.handleSort("price", true)
case chstr == "109": // "m
s.handleSort("marketcap", true)
case chstr == "118": // "v
s.handleSort("24hvolume", true)
case chstr == "49": // "1"
s.handleSort("1hchange", true)
case chstr == "50": // "2"
s.handleSort("24hchange", true)
case chstr == "55": // "7"
s.handleSort("7dchange", true)
case chstr == "116": // "t"
s.handleSort("totalsupply", true)
case chstr == "97": // "a"
s.handleSort("availablesupply", true)
case chstr == "108": // "l"
s.handleSort("lastupdated", true)
case chstr == "21": // ctrl-u
s.currentItem = s.currentItem - s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case fmt.Sprint(ch) == "4": // ctrl-d
s.currentItem = s.currentItem + s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case chstr == "104", chstr == "63": // "h", "?"
s.toggleHelp()
case chstr == "3", chstr == "113", chstr == "27": // ctrl-c, "q", esc
if s.helpVisible && chstr == "27" {
s.toggleHelp()
} else {
// quit
return nil
}
default:
s.menu.Driver(gc.DriverActions[ch])
}
}
}
func (s *Service) fetchData() error {
coins, err := cmc.GetAllCoinData(int(s.limit))
if err != nil {
return err
}
s.coins = []*cmc.Coin{}
for i := range coins {
coin := coins[i]
s.coins = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name {
s.sortDesc = !s.sortDesc
} else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) renderHelpWindow() error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply")
s.helpwin.MovePrint(17, 1, "<p> to sort by price")
s.helpwin.MovePrint(18, 1, "<v> to sort by 24 hour volume")
s.helpwin.MovePrint(19, 1, "<q> or <esc> to quit application.")
s.helpwin.Refresh()
return nil
}
// OnWindowResize sends event to channel when resize event occurs
func (s *Service) onWindowResize(channel chan os.Signal) {
//stdScr, _ := gc.Init()
//stdScr.ScrollOk(true)
//gc.NewLines(true)
for {
<-channel
//gc.StdScr().Clear()
//rows, cols := gc.StdScr().MaxYX()
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.resizeWindows()
//gc.End()
//gc.Update()
//gc.StdScr().Refresh()
}
}
// RenderMenu renders menu
func (s *Service) renderMenu() error | {
s.menuwinWidth = s.screenCols
s.menuwinHeight = s.screenRows - 1
s.menuWidth = s.screenCols
s.menuHeight = s.screenRows - 2
//if len(s.menuItems) == 0 {
items := make([]*gc.MenuItem, len(s.menuData))
var err error
for i, val := range s.menuData {
items[i], err = gc.NewItem(val, "")
if err != nil {
return err
}
//defer items[i].Free()
}
s.menuItems = items
//}
if s.menu == nil {
var err error
s.menu, err = gc.NewMenu(s.menuItems)
if err != nil {
return err
}
} else {
s.menu.UnPost()
s.menu.SetItems(s.menuItems)
s.menu.Current(s.menuItems[s.currentItem])
}
if s.menuwin == nil {
var err error
s.menuwin, err = gc.NewWindow(s.menuwinHeight, s.menuwinWidth, 0, 0)
s.menuwin.ScrollOk(true)
if err != nil {
return err
}
s.menuwin.Keypad(true)
s.menu.SetWindow(s.menuwin)
s.menusubwin = s.menuwin.Derived(s.menuHeight, s.menuWidth, 1, 0)
s.menu.SubWindow(s.menusubwin)
s.menu.Option(gc.O_ONEVALUE, false)
s.menu.Format(s.menuHeight, 0)
s.menu.Mark("")
} else {
s.menusubwin.Resize(s.menuHeight, s.menuWidth)
s.menuwin.Resize(s.menuHeight, s.menuWidth)
}
//s.menuwin.Clear()
s.menuwin.ColorOn(2)
s.menuwin.Box(0, 0)
s.menuwin.ColorOff(2)
s.menuwin.ColorOn(1)
s.menuwin.MovePrint(0, 0, s.menuHeader)
s.menuwin.ColorOff(1)
s.menuwin.ColorOn(2)
s.menuwin.MoveAddChar(2, 0, gc.ACS_LTEE)
//s.menuwin.HLine(2, 1, gc.ACS_HLINE, s.screenCols-6)
s.menuwin.ColorOff(2)
s.menu.Post()
s.menuwin.Refresh()
return nil
} | identifier_body |
|
table.go | package table
import (
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"time"
slice "sort"
humanize "github.com/dustin/go-humanize"
cmc "github.com/miguelmota/go-coinmarketcap/pro/v1"
gc "github.com/rthornton128/goncurses"
pad "github.com/willf/pad/utf8"
)
var wg sync.WaitGroup
// Service service struct
type Service struct {
stdsrc *gc.Window
screenRows int
screenCols int
mainwin *gc.Window
menuwin *gc.Window
menuwinWidth int
menuwinHeight int
menusubwin *gc.Window
helpbarwin *gc.Window
helpwin *gc.Window
helpVisible bool
logwin *gc.Window
menu *gc.Menu
menuItems []*gc.MenuItem
menuData []string
menuHeader string
menuWidth int
menuHeight int
coins []*cmc.Coin
sortBy string
sortDesc bool
limit uint
refresh uint
primaryColor string
lastLog string
currentItem int
}
// Options options struct
type Options struct {
Color string
Limit uint
Refresh uint
}
var once sync.Once
// New returns new service
func New(opts *Options) *Service {
var instance *Service
// once.Do(func() {
instance = &Service{}
instance.primaryColor = opts.Color
instance.limit = opts.Limit
instance.refresh = opts.Refresh
// })
return instance
}
// Render starts GUI
func (s *Service) Render() error {
var err error
s.stdsrc, err = gc.Init()
defer gc.End()
if err != nil {
return err
}
gc.UseDefaultColors()
gc.StartColor()
s.setColorPairs()
gc.Raw(true)
gc.Echo(false)
gc.Cursor(0)
s.stdsrc.Keypad(true)
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.helpVisible = false
s.renderMainWindow()
err = s.fetchData()
if err != nil {
return nil
}
go func() {
ticker := time.NewTicker(time.Duration(int64(s.refresh)) * time.Minute)
for {
select {
case <-ticker.C:
//s.menuwin.Clear()
//s.menuwin.Refresh()
s.fetchData()
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
}
}()
s.sortBy = "rank"
s.sortDesc = false
s.setMenuData()
err = s.renderMenu()
if err != nil {
panic(err)
}
defer s.menu.UnPost()
wg.Add(1)
resizeChannel := make(chan os.Signal)
signal.Notify(resizeChannel, syscall.SIGWINCH)
go s.onWindowResize(resizeChannel)
s.renderLogWindow()
s.renderHelpBar()
s.renderHelpWindow()
//stdsrc.GetChar() // required so it doesn't exit
//wg.Wait()
fields := make([]*gc.Field, 2)
fields[0], _ = gc.NewField(1, 10, 4, 18, 0, 0)
defer fields[0].Free()
fields[0].SetForeground(gc.ColorPair(1))
fields[0].SetBackground(gc.ColorPair(2) | gc.A_UNDERLINE | gc.A_BOLD)
fields[0].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1], _ = gc.NewField(1, 10, 6, 18, 0, 0)
defer fields[1].Free()
fields[1].SetForeground(gc.ColorPair(1))
fields[1].SetBackground(gc.A_UNDERLINE)
fields[1].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1].SetPad('*')
form, _ := gc.NewForm(fields)
form.Post()
form.Driver(gc.REQ_FIRST_FIELD)
for {
gc.Update()
ch := s.menuwin.GetChar()
chstr := fmt.Sprint(ch)
//s.log(fmt.Sprint(ch))
switch {
case ch == gc.KEY_DOWN, chstr == "106": // "j"
if s.currentItem < len(s.menuItems)-1 {
s.currentItem = s.currentItem + 1
s.menu.Current(s.menuItems[s.currentItem])
}
form.Driver(gc.REQ_NEXT_FIELD)
form.Driver(gc.REQ_END_LINE)
case ch == gc.KEY_UP, chstr == "107": // "k"
if s.currentItem > 0 {
s.currentItem = s.currentItem - 1
s.menu.Current(s.menuItems[s.currentItem])
}
case ch == gc.KEY_RETURN, ch == gc.KEY_ENTER, chstr == "32":
s.menu.Driver(gc.REQ_TOGGLE)
for _, item := range s.menu.Items() {
if item.Value() {
s.handleClick(item.Index())
break
}
}
s.menu.Driver(gc.REQ_TOGGLE)
case chstr == "114": // "r"
s.handleSort("rank", false)
case chstr == "110": // "n"
s.handleSort("name", true)
case chstr == "115": // "s"
s.handleSort("symbol", false)
case chstr == "112": // "p
s.handleSort("price", true)
case chstr == "109": // "m
s.handleSort("marketcap", true)
case chstr == "118": // "v
s.handleSort("24hvolume", true)
case chstr == "49": // "1"
s.handleSort("1hchange", true)
case chstr == "50": // "2"
s.handleSort("24hchange", true)
case chstr == "55": // "7"
s.handleSort("7dchange", true)
case chstr == "116": // "t"
s.handleSort("totalsupply", true)
case chstr == "97": // "a"
s.handleSort("availablesupply", true)
case chstr == "108": // "l"
s.handleSort("lastupdated", true)
case chstr == "21": // ctrl-u
s.currentItem = s.currentItem - s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case fmt.Sprint(ch) == "4": // ctrl-d
s.currentItem = s.currentItem + s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case chstr == "104", chstr == "63": // "h", "?"
s.toggleHelp()
case chstr == "3", chstr == "113", chstr == "27": // ctrl-c, "q", esc
if s.helpVisible && chstr == "27" {
s.toggleHelp()
} else {
// quit
return nil
}
default:
s.menu.Driver(gc.DriverActions[ch])
}
}
}
func (s *Service) fetchData() error {
coins, err := cmc.GetAllCoinData(int(s.limit))
if err != nil {
return err
}
s.coins = []*cmc.Coin{}
for i := range coins {
coin := coins[i]
s.coins = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name | else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) renderHelpWindow() error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply")
s.helpwin.MovePrint(17, 1, "<p> to sort by price")
s.helpwin.MovePrint(18, 1, "<v> to sort by 24 hour volume")
s.helpwin.MovePrint(19, 1, "<q> or <esc> to quit application.")
s.helpwin.Refresh()
return nil
}
// OnWindowResize sends event to channel when resize event occurs
func (s *Service) onWindowResize(channel chan os.Signal) {
//stdScr, _ := gc.Init()
//stdScr.ScrollOk(true)
//gc.NewLines(true)
for {
<-channel
//gc.StdScr().Clear()
//rows, cols := gc.StdScr().MaxYX()
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.resizeWindows()
//gc.End()
//gc.Update()
//gc.StdScr().Refresh()
}
}
// RenderMenu renders menu
func (s *Service) renderMenu() error {
s.menuwinWidth = s.screenCols
s.menuwinHeight = s.screenRows - 1
s.menuWidth = s.screenCols
s.menuHeight = s.screenRows - 2
//if len(s.menuItems) == 0 {
items := make([]*gc.MenuItem, len(s.menuData))
var err error
for i, val := range s.menuData {
items[i], err = gc.NewItem(val, "")
if err != nil {
return err
}
//defer items[i].Free()
}
s.menuItems = items
//}
if s.menu == nil {
var err error
s.menu, err = gc.NewMenu(s.menuItems)
if err != nil {
return err
}
} else {
s.menu.UnPost()
s.menu.SetItems(s.menuItems)
s.menu.Current(s.menuItems[s.currentItem])
}
if s.menuwin == nil {
var err error
s.menuwin, err = gc.NewWindow(s.menuwinHeight, s.menuwinWidth, 0, 0)
s.menuwin.ScrollOk(true)
if err != nil {
return err
}
s.menuwin.Keypad(true)
s.menu.SetWindow(s.menuwin)
s.menusubwin = s.menuwin.Derived(s.menuHeight, s.menuWidth, 1, 0)
s.menu.SubWindow(s.menusubwin)
s.menu.Option(gc.O_ONEVALUE, false)
s.menu.Format(s.menuHeight, 0)
s.menu.Mark("")
} else {
s.menusubwin.Resize(s.menuHeight, s.menuWidth)
s.menuwin.Resize(s.menuHeight, s.menuWidth)
}
//s.menuwin.Clear()
s.menuwin.ColorOn(2)
s.menuwin.Box(0, 0)
s.menuwin.ColorOff(2)
s.menuwin.ColorOn(1)
s.menuwin.MovePrint(0, 0, s.menuHeader)
s.menuwin.ColorOff(1)
s.menuwin.ColorOn(2)
s.menuwin.MoveAddChar(2, 0, gc.ACS_LTEE)
//s.menuwin.HLine(2, 1, gc.ACS_HLINE, s.screenCols-6)
s.menuwin.ColorOff(2)
s.menu.Post()
s.menuwin.Refresh()
return nil
}
| {
s.sortDesc = !s.sortDesc
} | conditional_block |
table.go | package table
import (
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"time"
slice "sort"
humanize "github.com/dustin/go-humanize"
cmc "github.com/miguelmota/go-coinmarketcap/pro/v1"
gc "github.com/rthornton128/goncurses"
pad "github.com/willf/pad/utf8"
)
var wg sync.WaitGroup
// Service service struct
type Service struct {
stdsrc *gc.Window
screenRows int
screenCols int
mainwin *gc.Window
menuwin *gc.Window
menuwinWidth int
menuwinHeight int
menusubwin *gc.Window
helpbarwin *gc.Window
helpwin *gc.Window
helpVisible bool
logwin *gc.Window
menu *gc.Menu
menuItems []*gc.MenuItem
menuData []string
menuHeader string
menuWidth int
menuHeight int
coins []*cmc.Coin
sortBy string
sortDesc bool
limit uint
refresh uint
primaryColor string
lastLog string
currentItem int
}
// Options options struct
type Options struct {
Color string
Limit uint
Refresh uint
}
var once sync.Once
// New returns new service
func New(opts *Options) *Service {
var instance *Service
// once.Do(func() {
instance = &Service{}
instance.primaryColor = opts.Color
instance.limit = opts.Limit
instance.refresh = opts.Refresh
// })
return instance
}
// Render starts GUI
func (s *Service) Render() error {
var err error
s.stdsrc, err = gc.Init()
defer gc.End()
if err != nil {
return err
}
gc.UseDefaultColors()
gc.StartColor()
s.setColorPairs()
gc.Raw(true)
gc.Echo(false)
gc.Cursor(0)
s.stdsrc.Keypad(true)
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.helpVisible = false
s.renderMainWindow()
err = s.fetchData()
if err != nil {
return nil
}
go func() {
ticker := time.NewTicker(time.Duration(int64(s.refresh)) * time.Minute)
for {
select {
case <-ticker.C:
//s.menuwin.Clear()
//s.menuwin.Refresh()
s.fetchData()
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
}
}()
s.sortBy = "rank"
s.sortDesc = false
s.setMenuData()
err = s.renderMenu()
if err != nil {
panic(err)
}
defer s.menu.UnPost()
wg.Add(1)
resizeChannel := make(chan os.Signal)
signal.Notify(resizeChannel, syscall.SIGWINCH)
go s.onWindowResize(resizeChannel)
s.renderLogWindow()
s.renderHelpBar()
s.renderHelpWindow()
//stdsrc.GetChar() // required so it doesn't exit
//wg.Wait()
fields := make([]*gc.Field, 2)
fields[0], _ = gc.NewField(1, 10, 4, 18, 0, 0)
defer fields[0].Free()
fields[0].SetForeground(gc.ColorPair(1))
fields[0].SetBackground(gc.ColorPair(2) | gc.A_UNDERLINE | gc.A_BOLD)
fields[0].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1], _ = gc.NewField(1, 10, 6, 18, 0, 0)
defer fields[1].Free()
fields[1].SetForeground(gc.ColorPair(1))
fields[1].SetBackground(gc.A_UNDERLINE)
fields[1].SetOptionsOff(gc.FO_AUTOSKIP)
fields[1].SetPad('*')
form, _ := gc.NewForm(fields)
form.Post()
form.Driver(gc.REQ_FIRST_FIELD)
for {
gc.Update()
ch := s.menuwin.GetChar()
chstr := fmt.Sprint(ch)
//s.log(fmt.Sprint(ch))
switch {
case ch == gc.KEY_DOWN, chstr == "106": // "j"
if s.currentItem < len(s.menuItems)-1 {
s.currentItem = s.currentItem + 1
s.menu.Current(s.menuItems[s.currentItem])
}
form.Driver(gc.REQ_NEXT_FIELD)
form.Driver(gc.REQ_END_LINE)
case ch == gc.KEY_UP, chstr == "107": // "k"
if s.currentItem > 0 {
s.currentItem = s.currentItem - 1
s.menu.Current(s.menuItems[s.currentItem])
}
case ch == gc.KEY_RETURN, ch == gc.KEY_ENTER, chstr == "32":
s.menu.Driver(gc.REQ_TOGGLE)
for _, item := range s.menu.Items() {
if item.Value() {
s.handleClick(item.Index())
break
}
}
s.menu.Driver(gc.REQ_TOGGLE)
case chstr == "114": // "r"
s.handleSort("rank", false)
case chstr == "110": // "n"
s.handleSort("name", true)
case chstr == "115": // "s"
s.handleSort("symbol", false)
case chstr == "112": // "p
s.handleSort("price", true)
case chstr == "109": // "m
s.handleSort("marketcap", true)
case chstr == "118": // "v
s.handleSort("24hvolume", true)
case chstr == "49": // "1"
s.handleSort("1hchange", true)
case chstr == "50": // "2"
s.handleSort("24hchange", true)
case chstr == "55": // "7"
s.handleSort("7dchange", true)
case chstr == "116": // "t"
s.handleSort("totalsupply", true)
case chstr == "97": // "a"
s.handleSort("availablesupply", true)
case chstr == "108": // "l"
s.handleSort("lastupdated", true)
case chstr == "21": // ctrl-u
s.currentItem = s.currentItem - s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case fmt.Sprint(ch) == "4": // ctrl-d
s.currentItem = s.currentItem + s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case chstr == "104", chstr == "63": // "h", "?"
s.toggleHelp()
case chstr == "3", chstr == "113", chstr == "27": // ctrl-c, "q", esc
if s.helpVisible && chstr == "27" {
s.toggleHelp()
} else {
// quit
return nil
}
default:
s.menu.Driver(gc.DriverActions[ch])
}
}
}
func (s *Service) fetchData() error {
coins, err := cmc.GetAllCoinData(int(s.limit))
if err != nil {
return err
}
s.coins = []*cmc.Coin{}
for i := range coins {
coin := coins[i]
s.coins = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name {
s.sortDesc = !s.sortDesc
} else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) | () error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply")
s.helpwin.MovePrint(17, 1, "<p> to sort by price")
s.helpwin.MovePrint(18, 1, "<v> to sort by 24 hour volume")
s.helpwin.MovePrint(19, 1, "<q> or <esc> to quit application.")
s.helpwin.Refresh()
return nil
}
// OnWindowResize sends event to channel when resize event occurs
func (s *Service) onWindowResize(channel chan os.Signal) {
//stdScr, _ := gc.Init()
//stdScr.ScrollOk(true)
//gc.NewLines(true)
for {
<-channel
//gc.StdScr().Clear()
//rows, cols := gc.StdScr().MaxYX()
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.resizeWindows()
//gc.End()
//gc.Update()
//gc.StdScr().Refresh()
}
}
// RenderMenu renders menu
func (s *Service) renderMenu() error {
s.menuwinWidth = s.screenCols
s.menuwinHeight = s.screenRows - 1
s.menuWidth = s.screenCols
s.menuHeight = s.screenRows - 2
//if len(s.menuItems) == 0 {
items := make([]*gc.MenuItem, len(s.menuData))
var err error
for i, val := range s.menuData {
items[i], err = gc.NewItem(val, "")
if err != nil {
return err
}
//defer items[i].Free()
}
s.menuItems = items
//}
if s.menu == nil {
var err error
s.menu, err = gc.NewMenu(s.menuItems)
if err != nil {
return err
}
} else {
s.menu.UnPost()
s.menu.SetItems(s.menuItems)
s.menu.Current(s.menuItems[s.currentItem])
}
if s.menuwin == nil {
var err error
s.menuwin, err = gc.NewWindow(s.menuwinHeight, s.menuwinWidth, 0, 0)
s.menuwin.ScrollOk(true)
if err != nil {
return err
}
s.menuwin.Keypad(true)
s.menu.SetWindow(s.menuwin)
s.menusubwin = s.menuwin.Derived(s.menuHeight, s.menuWidth, 1, 0)
s.menu.SubWindow(s.menusubwin)
s.menu.Option(gc.O_ONEVALUE, false)
s.menu.Format(s.menuHeight, 0)
s.menu.Mark("")
} else {
s.menusubwin.Resize(s.menuHeight, s.menuWidth)
s.menuwin.Resize(s.menuHeight, s.menuWidth)
}
//s.menuwin.Clear()
s.menuwin.ColorOn(2)
s.menuwin.Box(0, 0)
s.menuwin.ColorOff(2)
s.menuwin.ColorOn(1)
s.menuwin.MovePrint(0, 0, s.menuHeader)
s.menuwin.ColorOff(1)
s.menuwin.ColorOn(2)
s.menuwin.MoveAddChar(2, 0, gc.ACS_LTEE)
//s.menuwin.HLine(2, 1, gc.ACS_HLINE, s.screenCols-6)
s.menuwin.ColorOff(2)
s.menu.Post()
s.menuwin.Refresh()
return nil
}
| renderHelpWindow | identifier_name |
cli.rs | // Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod setup {
use std::io::{self, Write};
use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) | else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint("No"),
White.paint("/quit]"))
}
}
None => format!("{}", White.paint("[yes/no/quit]")),
};
loop {
try!(io::stdout().flush());
print!("{} {} ", Cyan.paint(question), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
match response.trim().chars().next().unwrap_or('\n') {
'y' | 'Y' => return Ok(true),
'n' | 'N' => return Ok(false),
'q' | 'Q' => process::exit(0),
'\n' => {
match default {
Some(default) => return Ok(default),
None => continue,
}
}
_ => continue,
}
}
}
fn prompt_ask(question: &str, default: Option<&str>) -> Result<String> {
let choice = match default {
Some(d) => {
format!(" {}{}{}",
White.paint("[default: "),
White.bold().paint(d),
White.paint("]"))
}
None => "".to_string(),
};
loop {
try!(io::stdout().flush());
print!("{}{} ", Cyan.paint(format!("{}:", question)), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
if response.trim().is_empty() {
match default {
Some(d) => return Ok(d.to_string()),
None => continue,
}
}
return Ok(response.trim().to_string());
}
}
}
| {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} | conditional_block |
cli.rs | // Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod setup {
use std::io::{self, Write};
use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn | (origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint("No"),
White.paint("/quit]"))
}
}
None => format!("{}", White.paint("[yes/no/quit]")),
};
loop {
try!(io::stdout().flush());
print!("{} {} ", Cyan.paint(question), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
match response.trim().chars().next().unwrap_or('\n') {
'y' | 'Y' => return Ok(true),
'n' | 'N' => return Ok(false),
'q' | 'Q' => process::exit(0),
'\n' => {
match default {
Some(default) => return Ok(default),
None => continue,
}
}
_ => continue,
}
}
}
fn prompt_ask(question: &str, default: Option<&str>) -> Result<String> {
let choice = match default {
Some(d) => {
format!(" {}{}{}",
White.paint("[default: "),
White.bold().paint(d),
White.paint("]"))
}
None => "".to_string(),
};
loop {
try!(io::stdout().flush());
print!("{}{} ", Cyan.paint(format!("{}:", question)), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
if response.trim().is_empty() {
match default {
Some(d) => return Ok(d.to_string()),
None => continue,
}
}
return Ok(response.trim().to_string());
}
}
}
| is_origin_in_cache | identifier_name |
cli.rs | // Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod setup {
use std::io::{self, Write};
use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint("No"),
White.paint("/quit]"))
}
}
None => format!("{}", White.paint("[yes/no/quit]")),
};
loop {
try!(io::stdout().flush());
print!("{} {} ", Cyan.paint(question), choice);
try!(io::stdout().flush()); | try!(io::stdin().read_line(&mut response));
match response.trim().chars().next().unwrap_or('\n') {
'y' | 'Y' => return Ok(true),
'n' | 'N' => return Ok(false),
'q' | 'Q' => process::exit(0),
'\n' => {
match default {
Some(default) => return Ok(default),
None => continue,
}
}
_ => continue,
}
}
}
fn prompt_ask(question: &str, default: Option<&str>) -> Result<String> {
let choice = match default {
Some(d) => {
format!(" {}{}{}",
White.paint("[default: "),
White.bold().paint(d),
White.paint("]"))
}
None => "".to_string(),
};
loop {
try!(io::stdout().flush());
print!("{}{} ", Cyan.paint(format!("{}:", question)), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
if response.trim().is_empty() {
match default {
Some(d) => return Ok(d.to_string()),
None => continue,
}
}
return Ok(response.trim().to_string());
}
}
} | let mut response = String::new(); | random_line_split |
cli.rs | // Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod setup {
use std::io::{self, Write};
use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> |
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint("No"),
White.paint("/quit]"))
}
}
None => format!("{}", White.paint("[yes/no/quit]")),
};
loop {
try!(io::stdout().flush());
print!("{} {} ", Cyan.paint(question), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
match response.trim().chars().next().unwrap_or('\n') {
'y' | 'Y' => return Ok(true),
'n' | 'N' => return Ok(false),
'q' | 'Q' => process::exit(0),
'\n' => {
match default {
Some(default) => return Ok(default),
None => continue,
}
}
_ => continue,
}
}
}
fn prompt_ask(question: &str, default: Option<&str>) -> Result<String> {
let choice = match default {
Some(d) => {
format!(" {}{}{}",
White.paint("[default: "),
White.bold().paint(d),
White.paint("]"))
}
None => "".to_string(),
};
loop {
try!(io::stdout().flush());
print!("{}{} ", Cyan.paint(format!("{}:", question)), choice);
try!(io::stdout().flush());
let mut response = String::new();
try!(io::stdin().read_line(&mut response));
if response.trim().is_empty() {
match default {
Some(d) => return Ok(d.to_string()),
None => continue,
}
}
return Ok(response.trim().to_string());
}
}
}
| {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
} | identifier_body |
main.go | package main
import (
"crypto/sha1"
"encoding/hex"
"flag"
"fmt"
"github.com/amyangfei/dynamicmq-go/chord"
dmq "github.com/amyangfei/dynamicmq-go/dynamicmq"
"github.com/op/go-logging"
"github.com/rakyll/globalconf"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"syscall"
)
// Server basic configuration
var Config *SrvConfig
// Etcd client pool
var EtcdCliPool *dmq.EtcdClientPool
// Meta redis client pool
var MetaRCPool *dmq.RedisCliPool
// common log
var log = logging.MustGetLogger("dynamicmq-match-datanode")
// logger used for serf daemon in chord node
var serfLog *os.File
// Node in chord topology
var ChordNode *chord.Node
// Mapping from subclient's id to subclient information
// The subclient's id is in BSON format, not hex string
var ClisInfo = dmq.NewConcurrentMap()
// mapping from dispatcher's id(disp name) to a DispConn struct with it
var DispConns map[string]*DispConn
// the connected dispatcher at present
var CurDispNode *DispNode
// initSignal register signals handler.
func initSignal() chan os.Signal {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM,
syscall.SIGINT, syscall.SIGSTOP)
return c
}
func handleSignal(c chan os.Signal) {
// Block until a signal is received.
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 |
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
return nil
}
func shutdownServer() {
log.Info("Datanode stop...")
unRegisterDN(Config, ChordNode, EtcdCliPool)
os.Exit(0)
}
func chordRoutine() {
conf := &chord.NodeConfig{
Serf: &chord.SerfConfig{
BinPath: Config.SerfBinPath,
NodeName: Config.SerfNodeName,
BindAddr: Config.SerfBindAddr,
RPCAddr: Config.SerfRPCAddr,
EvHandler: Config.SerfEvHandler,
},
Hostname: Config.Hostname,
HostIP: Config.BindIP,
BindAddr: Config.BindAddr,
RPCAddr: Config.RPCAddr,
NumVnodes: Config.NumVnodes,
NumSuccessors: Config.NumSuccessors,
HashFunc: sha1.New,
HashBits: Config.HashBits,
StartHash: Config.StartHash,
Entrypoint: Config.Entrypoint,
TCPRecvBufSize: Config.TCPRecvBufSize,
TCPSendBufSize: Config.TCPSendBufSize,
TCPBufInsNum: Config.TCPBufInsNum,
TCPBufioNum: Config.TCPBufioNum,
}
c := make(chan chord.Notification)
var err error
ChordNode, err = chord.Create(conf)
if err != nil {
panic(err)
}
ChordNode.SetLogger(log)
ChordNode.StartStatusTcp()
ChordNode.SerfSchdule(c, serfLog)
// FIXME: register datanode and vnode in a more accruacy time
if err := registerDataNode(Config, EtcdCliPool); err != nil {
panic(err)
}
if err := registerVnodes(Config, ChordNode, EtcdCliPool); err != nil {
panic(err)
}
go func() {
for {
notify := <-c
if notify.Err != nil {
log.Error("serf error %v: %s", notify.Err, notify.Msg)
}
}
}()
}
func startChordNode() error {
go chordRoutine()
return nil
}
func notifyService() error {
for _, attrRedisAddr := range Config.AttrRedisAddrs {
rcfg := dmq.NewRedisConfig(attrRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
rcpool, err := dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
go attrWatcher(rcpool)
}
return nil
}
func main() {
var configFile string
var printVer bool
var entrypoint string
var starthash string
flag.BoolVar(&printVer, "version", false, "print version")
flag.StringVar(&configFile, "c", "config.ini", "specify config file")
flag.StringVar(&entrypoint, "e", "",
"serf entrypoint used for joining into cluster")
flag.StringVar(&starthash, "s", "", "chord node start hash hex string")
flag.Parse()
if printVer {
dmq.PrintVersion()
os.Exit(0)
}
if starthash == "" {
fmt.Println("Warning: starthash must be provided!")
flag.Usage()
os.Exit(-1)
}
if err := initConfig(configFile, entrypoint, starthash); err != nil {
panic(err)
}
if err := dmq.ProcessInit(Config.Workdir, Config.PidFile); err != nil {
panic(err)
}
if err := initLog(Config.LogFile, Config.SerfLogFile, Config.LogLevel); err != nil {
panic(err)
}
if err := initServer(); err != nil {
panic(err)
}
if err := startChordNode(); err != nil {
panic(err)
}
if err := notifyService(); err != nil {
panic(err)
}
startPubTCP(Config.BindAddr)
signalChan := initSignal()
handleSignal(signalChan)
}
| {
starthash = "0" + starthash
} | conditional_block |
main.go | package main
import (
"crypto/sha1"
"encoding/hex"
"flag"
"fmt"
"github.com/amyangfei/dynamicmq-go/chord"
dmq "github.com/amyangfei/dynamicmq-go/dynamicmq"
"github.com/op/go-logging"
"github.com/rakyll/globalconf"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"syscall"
)
// Server basic configuration
var Config *SrvConfig
// Etcd client pool
var EtcdCliPool *dmq.EtcdClientPool
// Meta redis client pool
var MetaRCPool *dmq.RedisCliPool
// common log
var log = logging.MustGetLogger("dynamicmq-match-datanode")
// logger used for serf daemon in chord node
var serfLog *os.File
// Node in chord topology
var ChordNode *chord.Node
// Mapping from subclient's id to subclient information
// The subclient's id is in BSON format, not hex string
var ClisInfo = dmq.NewConcurrentMap()
// mapping from dispatcher's id(disp name) to a DispConn struct with it
var DispConns map[string]*DispConn
// the connected dispatcher at present
var CurDispNode *DispNode
// initSignal register signals handler.
func initSignal() chan os.Signal {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM,
syscall.SIGINT, syscall.SIGSTOP)
return c
}
func handleSignal(c chan os.Signal) {
// Block until a signal is received.
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
return nil
}
func shutdownServer() |
func chordRoutine() {
conf := &chord.NodeConfig{
Serf: &chord.SerfConfig{
BinPath: Config.SerfBinPath,
NodeName: Config.SerfNodeName,
BindAddr: Config.SerfBindAddr,
RPCAddr: Config.SerfRPCAddr,
EvHandler: Config.SerfEvHandler,
},
Hostname: Config.Hostname,
HostIP: Config.BindIP,
BindAddr: Config.BindAddr,
RPCAddr: Config.RPCAddr,
NumVnodes: Config.NumVnodes,
NumSuccessors: Config.NumSuccessors,
HashFunc: sha1.New,
HashBits: Config.HashBits,
StartHash: Config.StartHash,
Entrypoint: Config.Entrypoint,
TCPRecvBufSize: Config.TCPRecvBufSize,
TCPSendBufSize: Config.TCPSendBufSize,
TCPBufInsNum: Config.TCPBufInsNum,
TCPBufioNum: Config.TCPBufioNum,
}
c := make(chan chord.Notification)
var err error
ChordNode, err = chord.Create(conf)
if err != nil {
panic(err)
}
ChordNode.SetLogger(log)
ChordNode.StartStatusTcp()
ChordNode.SerfSchdule(c, serfLog)
// FIXME: register datanode and vnode in a more accruacy time
if err := registerDataNode(Config, EtcdCliPool); err != nil {
panic(err)
}
if err := registerVnodes(Config, ChordNode, EtcdCliPool); err != nil {
panic(err)
}
go func() {
for {
notify := <-c
if notify.Err != nil {
log.Error("serf error %v: %s", notify.Err, notify.Msg)
}
}
}()
}
func startChordNode() error {
go chordRoutine()
return nil
}
func notifyService() error {
for _, attrRedisAddr := range Config.AttrRedisAddrs {
rcfg := dmq.NewRedisConfig(attrRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
rcpool, err := dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
go attrWatcher(rcpool)
}
return nil
}
func main() {
var configFile string
var printVer bool
var entrypoint string
var starthash string
flag.BoolVar(&printVer, "version", false, "print version")
flag.StringVar(&configFile, "c", "config.ini", "specify config file")
flag.StringVar(&entrypoint, "e", "",
"serf entrypoint used for joining into cluster")
flag.StringVar(&starthash, "s", "", "chord node start hash hex string")
flag.Parse()
if printVer {
dmq.PrintVersion()
os.Exit(0)
}
if starthash == "" {
fmt.Println("Warning: starthash must be provided!")
flag.Usage()
os.Exit(-1)
}
if err := initConfig(configFile, entrypoint, starthash); err != nil {
panic(err)
}
if err := dmq.ProcessInit(Config.Workdir, Config.PidFile); err != nil {
panic(err)
}
if err := initLog(Config.LogFile, Config.SerfLogFile, Config.LogLevel); err != nil {
panic(err)
}
if err := initServer(); err != nil {
panic(err)
}
if err := startChordNode(); err != nil {
panic(err)
}
if err := notifyService(); err != nil {
panic(err)
}
startPubTCP(Config.BindAddr)
signalChan := initSignal()
handleSignal(signalChan)
}
| {
log.Info("Datanode stop...")
unRegisterDN(Config, ChordNode, EtcdCliPool)
os.Exit(0)
} | identifier_body |
main.go | package main
import (
"crypto/sha1"
"encoding/hex"
"flag"
"fmt"
"github.com/amyangfei/dynamicmq-go/chord"
dmq "github.com/amyangfei/dynamicmq-go/dynamicmq"
"github.com/op/go-logging"
"github.com/rakyll/globalconf"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"syscall"
)
// Server basic configuration
var Config *SrvConfig
// Etcd client pool
var EtcdCliPool *dmq.EtcdClientPool
// Meta redis client pool
var MetaRCPool *dmq.RedisCliPool
// common log
var log = logging.MustGetLogger("dynamicmq-match-datanode")
// logger used for serf daemon in chord node
var serfLog *os.File
// Node in chord topology
var ChordNode *chord.Node
// Mapping from subclient's id to subclient information
// The subclient's id is in BSON format, not hex string
var ClisInfo = dmq.NewConcurrentMap()
// mapping from dispatcher's id(disp name) to a DispConn struct with it
var DispConns map[string]*DispConn
// the connected dispatcher at present
var CurDispNode *DispNode
// initSignal register signals handler.
func initSignal() chan os.Signal {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM,
syscall.SIGINT, syscall.SIGSTOP)
return c
}
func handleSignal(c chan os.Signal) {
// Block until a signal is received.
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func | (logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
return nil
}
func shutdownServer() {
log.Info("Datanode stop...")
unRegisterDN(Config, ChordNode, EtcdCliPool)
os.Exit(0)
}
func chordRoutine() {
conf := &chord.NodeConfig{
Serf: &chord.SerfConfig{
BinPath: Config.SerfBinPath,
NodeName: Config.SerfNodeName,
BindAddr: Config.SerfBindAddr,
RPCAddr: Config.SerfRPCAddr,
EvHandler: Config.SerfEvHandler,
},
Hostname: Config.Hostname,
HostIP: Config.BindIP,
BindAddr: Config.BindAddr,
RPCAddr: Config.RPCAddr,
NumVnodes: Config.NumVnodes,
NumSuccessors: Config.NumSuccessors,
HashFunc: sha1.New,
HashBits: Config.HashBits,
StartHash: Config.StartHash,
Entrypoint: Config.Entrypoint,
TCPRecvBufSize: Config.TCPRecvBufSize,
TCPSendBufSize: Config.TCPSendBufSize,
TCPBufInsNum: Config.TCPBufInsNum,
TCPBufioNum: Config.TCPBufioNum,
}
c := make(chan chord.Notification)
var err error
ChordNode, err = chord.Create(conf)
if err != nil {
panic(err)
}
ChordNode.SetLogger(log)
ChordNode.StartStatusTcp()
ChordNode.SerfSchdule(c, serfLog)
// FIXME: register datanode and vnode in a more accruacy time
if err := registerDataNode(Config, EtcdCliPool); err != nil {
panic(err)
}
if err := registerVnodes(Config, ChordNode, EtcdCliPool); err != nil {
panic(err)
}
go func() {
for {
notify := <-c
if notify.Err != nil {
log.Error("serf error %v: %s", notify.Err, notify.Msg)
}
}
}()
}
func startChordNode() error {
go chordRoutine()
return nil
}
func notifyService() error {
for _, attrRedisAddr := range Config.AttrRedisAddrs {
rcfg := dmq.NewRedisConfig(attrRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
rcpool, err := dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
go attrWatcher(rcpool)
}
return nil
}
func main() {
var configFile string
var printVer bool
var entrypoint string
var starthash string
flag.BoolVar(&printVer, "version", false, "print version")
flag.StringVar(&configFile, "c", "config.ini", "specify config file")
flag.StringVar(&entrypoint, "e", "",
"serf entrypoint used for joining into cluster")
flag.StringVar(&starthash, "s", "", "chord node start hash hex string")
flag.Parse()
if printVer {
dmq.PrintVersion()
os.Exit(0)
}
if starthash == "" {
fmt.Println("Warning: starthash must be provided!")
flag.Usage()
os.Exit(-1)
}
if err := initConfig(configFile, entrypoint, starthash); err != nil {
panic(err)
}
if err := dmq.ProcessInit(Config.Workdir, Config.PidFile); err != nil {
panic(err)
}
if err := initLog(Config.LogFile, Config.SerfLogFile, Config.LogLevel); err != nil {
panic(err)
}
if err := initServer(); err != nil {
panic(err)
}
if err := startChordNode(); err != nil {
panic(err)
}
if err := notifyService(); err != nil {
panic(err)
}
startPubTCP(Config.BindAddr)
signalChan := initSignal()
handleSignal(signalChan)
}
| initLog | identifier_name |
main.go | package main
import (
"crypto/sha1"
"encoding/hex"
"flag"
"fmt"
"github.com/amyangfei/dynamicmq-go/chord"
dmq "github.com/amyangfei/dynamicmq-go/dynamicmq"
"github.com/op/go-logging"
"github.com/rakyll/globalconf"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"syscall"
)
// Server basic configuration
var Config *SrvConfig
// Etcd client pool
var EtcdCliPool *dmq.EtcdClientPool
// Meta redis client pool
var MetaRCPool *dmq.RedisCliPool
// common log
var log = logging.MustGetLogger("dynamicmq-match-datanode")
// logger used for serf daemon in chord node
var serfLog *os.File
// Node in chord topology
var ChordNode *chord.Node
// Mapping from subclient's id to subclient information
// The subclient's id is in BSON format, not hex string
var ClisInfo = dmq.NewConcurrentMap()
// mapping from dispatcher's id(disp name) to a DispConn struct with it
var DispConns map[string]*DispConn
// the connected dispatcher at present
var CurDispNode *DispNode
// initSignal register signals handler.
func initSignal() chan os.Signal {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM,
syscall.SIGINT, syscall.SIGSTOP)
return c
}
func handleSignal(c chan os.Signal) {
// Block until a signal is received.
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String()) | Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
return nil
}
func shutdownServer() {
log.Info("Datanode stop...")
unRegisterDN(Config, ChordNode, EtcdCliPool)
os.Exit(0)
}
func chordRoutine() {
conf := &chord.NodeConfig{
Serf: &chord.SerfConfig{
BinPath: Config.SerfBinPath,
NodeName: Config.SerfNodeName,
BindAddr: Config.SerfBindAddr,
RPCAddr: Config.SerfRPCAddr,
EvHandler: Config.SerfEvHandler,
},
Hostname: Config.Hostname,
HostIP: Config.BindIP,
BindAddr: Config.BindAddr,
RPCAddr: Config.RPCAddr,
NumVnodes: Config.NumVnodes,
NumSuccessors: Config.NumSuccessors,
HashFunc: sha1.New,
HashBits: Config.HashBits,
StartHash: Config.StartHash,
Entrypoint: Config.Entrypoint,
TCPRecvBufSize: Config.TCPRecvBufSize,
TCPSendBufSize: Config.TCPSendBufSize,
TCPBufInsNum: Config.TCPBufInsNum,
TCPBufioNum: Config.TCPBufioNum,
}
c := make(chan chord.Notification)
var err error
ChordNode, err = chord.Create(conf)
if err != nil {
panic(err)
}
ChordNode.SetLogger(log)
ChordNode.StartStatusTcp()
ChordNode.SerfSchdule(c, serfLog)
// FIXME: register datanode and vnode in a more accruacy time
if err := registerDataNode(Config, EtcdCliPool); err != nil {
panic(err)
}
if err := registerVnodes(Config, ChordNode, EtcdCliPool); err != nil {
panic(err)
}
go func() {
for {
notify := <-c
if notify.Err != nil {
log.Error("serf error %v: %s", notify.Err, notify.Msg)
}
}
}()
}
func startChordNode() error {
go chordRoutine()
return nil
}
func notifyService() error {
for _, attrRedisAddr := range Config.AttrRedisAddrs {
rcfg := dmq.NewRedisConfig(attrRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
rcpool, err := dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
go attrWatcher(rcpool)
}
return nil
}
func main() {
var configFile string
var printVer bool
var entrypoint string
var starthash string
flag.BoolVar(&printVer, "version", false, "print version")
flag.StringVar(&configFile, "c", "config.ini", "specify config file")
flag.StringVar(&entrypoint, "e", "",
"serf entrypoint used for joining into cluster")
flag.StringVar(&starthash, "s", "", "chord node start hash hex string")
flag.Parse()
if printVer {
dmq.PrintVersion()
os.Exit(0)
}
if starthash == "" {
fmt.Println("Warning: starthash must be provided!")
flag.Usage()
os.Exit(-1)
}
if err := initConfig(configFile, entrypoint, starthash); err != nil {
panic(err)
}
if err := dmq.ProcessInit(Config.Workdir, Config.PidFile); err != nil {
panic(err)
}
if err := initLog(Config.LogFile, Config.SerfLogFile, Config.LogLevel); err != nil {
panic(err)
}
if err := initServer(); err != nil {
panic(err)
}
if err := startChordNode(); err != nil {
panic(err)
}
if err := notifyService(); err != nil {
panic(err)
}
startPubTCP(Config.BindAddr)
signalChan := initSignal()
handleSignal(signalChan)
} |
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";") | random_line_split |
hypsometry_plots.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 11:57:37 2021
Hypsometry plots
To Do:
select only glaciers where 'RGIId' matches with filtered dissolved outline 'RGIId'
@author: apj
"""
import pandas as pd
import geopandas as gpd
from rasterstats import zonal_stats
import rasterio as rio
import rasterio.mask
import matplotlib.pyplot as plt
import numpy as np
import glob
from shapely.validation import explain_validity
import os
# filepaths
fp_tiff = r'/Users/apj/Documents/_HY/Greenland/dem_diff/vgridshift/filled_ddem/*global*.tif'
fp_outlines = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/final/*final_edit.shp'
fp_diss_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_utm_exclude_bin_thresh.shp'
fp_surging_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_all_surging.shp'
fp_exclude = r'/Users/apj/Documents/_HY/Greenland/dem_diff/filled_ddem/glaciers_to_exclude_edit.csv'
fp_dem = r'/Users/apj/Documents/_HY/Greenland/DEM_masked/2016_dem_studyarea_3681x3295.tif'
fp_c = r'/Users/apj/Documents/_HY/Greenland/contour/2016_filled_contour.shp'
def checkGeom(geodataframe):
"""
Function to check validity of geometry. Returns message from shapely explain_validity if geometry is not 'Valid Geometry'
Parameters
----------
geodataframe : TYPE
DESCRIPTION.
Returns
-------
Message.
"""
for geometry in geodataframe.geometry:
if explain_validity(geometry) != 'Valid Geometry':
print(explain_validity(geometry))
def areaDiff(outline, elevation_bin):
"""
Function to calculate area in an elevation bin
Parameters
----------
outline : Polygon
Polygon containing outlines.
elevation_bin : Polygon
Polygon containing elevation ranges
contour_range : String
Elevation range to be selected
Returns
-------
elev_range_area_sum : float
Sum of areas from outline polygon inside the elevation bin
"""
# clip outlines by selected elevation range
outline_elev_range = gpd.clip(outline, elevation_bin, keep_geom_type=(True))
# check that clipped dataframe is not empty
if outline_elev_range.empty == True:
return
# compute area in km2
elev_range_area = outline_elev_range.geometry.area / 1000000
# sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
|
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
tifflist.append(t)
# get elevation differences for each elevation bin
for tif in tifflist:
bname = os.path.basename(tif)
# read ddem and mask with selected glaciers
ddem = glacierMask(tif, shapes)
# classify dem to bins
digitized = np.digitize(dem, bins)
# calculate average elevation difference per bin
bin_means = bin_data(bins, ddem, dem, mode='mean', nbinned=False)
# parse column name
colname = 'mu_dh_' + bname[0:12]
# update results
for i, _ in enumerate(bins):
result.loc[result['bins'] == bins[i], colname] = bin_means[i]
# update bins column to integer
result['bins'] = result['bins'].astype(int)
# list for area sum strings
asumstr = []
# add area change to new columns
# loop through dictionary keys and values
for x, y in outlinedict.items():
# store the first four characters (the year) from the filename to variable
year = x[:4]
# check total glacierized area
g_area = sum(y.area) / 1000000
unc_a = g_area * 0.1
print('Area in ' + str(year) + ': {:.2f} ± {:.2f} km2'.format(g_area, unc_a)) #str(round(g_area / 1000000, 3)) + ' km2')
areasum_str = str(year) + ': {:.2f} ± {:.2f} $km^2$'.format(g_area, unc_a)
asumstr.append(areasum_str)
# add column for results
result[str(x[:4])+'Akm2'] = ""
# loop through elevation bins and calculate area altitude difference for each bin
for i in bins:
i = i.astype(int)
# selection by contour range before applying functions
elev_bin = contdis[contdis['low_cont'] == i.astype(str)]
# use function
out = areaDiff(y, elev_bin)
if out is None:
out = 0
# store result to dataframe
result.loc[result['bins'] == i, str(x[:4])+'Akm2'] = out
# calculate area differences (e.g.2016 - 1953 so positive values show area increase and negative decrease)
result['dA53t85'] = result['1985Akm2'] - result['1953Akm2']
result['dA53t16'] = result['2016Akm2'] - result['1953Akm2']
result['dA85t16'] = result['2016Akm2'] - result['1985Akm2']
result = result.dropna(axis=0, how='any')
# figure output
fig_out = r'/Users/apj/Documents/_HY/Greenland/contour/figures/vgridshift/hypsometry_active_surging_glaciers_filled_global_mean.png'
# create hypsometry and area altitude plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
# hypsometry plot
line5385 = axes[0].plot(result['mu_dh_1953_to_1985'], result['bins'], marker='p', color='k', linewidth= 0.9, label='dh 1953 to 1985')
line532016 = axes[0].plot(result['mu_dh_1953_to_2016'], result['bins'], marker='v', color='b', linewidth=0.9, label='dh 1953 to 2016')
line852016 = axes[0].plot(result['mu_dh_1985_to_2016'], result['bins'], marker='s', color='g', linewidth=0.9, label='dh 1985 to 2016')
axes[0].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[0].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[0].axvline(0, color='grey', ls='--')
axes[0].set_ylabel('Elevation bin (m)')
axes[0].set_xlabel('Average elevation difference (m)')
axes[0].legend(loc=2)
axes[0].grid()
# area-altitude plot
area1953 = axes[1].plot(result['1953Akm2'], result['bins'], marker='s', color='k', linewidth= 0.9, label='1953')
area1985 = axes[1].plot(result['1985Akm2'], result['bins'], marker='^', color='#994C00', linewidth=0.9, label='1985')
area2016 = axes[1].plot(result['2016Akm2'], result['bins'], marker='o', color='#006633', linewidth=0.9, label='2016')
axes[1].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[1].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[1].axvline(0, color='grey', ls='--')
axes[1].set_ylabel('Elevation bin (m)')
axes[1].set_xlabel('Area altitude distribution ($km^2$)')
axes[1].legend(loc=1)
axes[1].grid()
textstr = '\n'.join((
'Glacierized area',
asumstr[0],
asumstr[1],
asumstr[2]))
# matplotlib patch properties
props = dict(boxstyle='round', facecolor='white', alpha=1)
# place text box in axes coords
axes[1].text(0.5, 0.05, textstr, transform=axes[1].transAxes, fontsize=10,
verticalalignment='center', bbox=props)
# fig title
fig.suptitle('Active surging glaciers', fontsize=20)
plt.tight_layout(pad=1.5)
plt.savefig(fig_out, dpi=600, format='png')
| """
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned) | identifier_body |
hypsometry_plots.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 11:57:37 2021
Hypsometry plots
To Do:
select only glaciers where 'RGIId' matches with filtered dissolved outline 'RGIId'
@author: apj
"""
import pandas as pd
import geopandas as gpd
from rasterstats import zonal_stats
import rasterio as rio
import rasterio.mask
import matplotlib.pyplot as plt
import numpy as np
import glob
from shapely.validation import explain_validity
import os
# filepaths
fp_tiff = r'/Users/apj/Documents/_HY/Greenland/dem_diff/vgridshift/filled_ddem/*global*.tif'
fp_outlines = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/final/*final_edit.shp'
fp_diss_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_utm_exclude_bin_thresh.shp'
fp_surging_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_all_surging.shp'
fp_exclude = r'/Users/apj/Documents/_HY/Greenland/dem_diff/filled_ddem/glaciers_to_exclude_edit.csv'
fp_dem = r'/Users/apj/Documents/_HY/Greenland/DEM_masked/2016_dem_studyarea_3681x3295.tif'
fp_c = r'/Users/apj/Documents/_HY/Greenland/contour/2016_filled_contour.shp'
def checkGeom(geodataframe):
"""
Function to check validity of geometry. Returns message from shapely explain_validity if geometry is not 'Valid Geometry'
Parameters
----------
geodataframe : TYPE
DESCRIPTION.
Returns
-------
Message.
"""
for geometry in geodataframe.geometry:
if explain_validity(geometry) != 'Valid Geometry':
print(explain_validity(geometry))
def areaDiff(outline, elevation_bin):
"""
Function to calculate area in an elevation bin
Parameters
----------
outline : Polygon
Polygon containing outlines.
elevation_bin : Polygon
Polygon containing elevation ranges
contour_range : String
Elevation range to be selected
Returns
-------
elev_range_area_sum : float
Sum of areas from outline polygon inside the elevation bin
"""
# clip outlines by selected elevation range
outline_elev_range = gpd.clip(outline, elevation_bin, keep_geom_type=(True))
# check that clipped dataframe is not empty
if outline_elev_range.empty == True:
return
# compute area in km2
elev_range_area = outline_elev_range.geometry.area / 1000000
# sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
"""
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
tifflist.append(t)
# get elevation differences for each elevation bin
for tif in tifflist:
bname = os.path.basename(tif)
# read ddem and mask with selected glaciers
ddem = glacierMask(tif, shapes)
# classify dem to bins
digitized = np.digitize(dem, bins)
# calculate average elevation difference per bin
bin_means = bin_data(bins, ddem, dem, mode='mean', nbinned=False)
# parse column name
colname = 'mu_dh_' + bname[0:12]
# update results
for i, _ in enumerate(bins):
result.loc[result['bins'] == bins[i], colname] = bin_means[i]
# update bins column to integer
result['bins'] = result['bins'].astype(int)
# list for area sum strings
asumstr = []
# add area change to new columns
# loop through dictionary keys and values
for x, y in outlinedict.items():
# store the first four characters (the year) from the filename to variable
year = x[:4]
# check total glacierized area
g_area = sum(y.area) / 1000000
unc_a = g_area * 0.1
print('Area in ' + str(year) + ': {:.2f} ± {:.2f} km2'.format(g_area, unc_a)) #str(round(g_area / 1000000, 3)) + ' km2')
areasum_str = str(year) + ': {:.2f} ± {:.2f} $km^2$'.format(g_area, unc_a)
asumstr.append(areasum_str)
# add column for results
result[str(x[:4])+'Akm2'] = ""
# loop through elevation bins and calculate area altitude difference for each bin
for i in bins:
i = i.astype(int)
# selection by contour range before applying functions
elev_bin = contdis[contdis['low_cont'] == i.astype(str)]
# use function
out = areaDiff(y, elev_bin)
if out is None:
out = 0
# store result to dataframe
result.loc[result['bins'] == i, str(x[:4])+'Akm2'] = out
# calculate area differences (e.g.2016 - 1953 so positive values show area increase and negative decrease)
result['dA53t85'] = result['1985Akm2'] - result['1953Akm2']
result['dA53t16'] = result['2016Akm2'] - result['1953Akm2']
result['dA85t16'] = result['2016Akm2'] - result['1985Akm2']
result = result.dropna(axis=0, how='any')
# figure output
fig_out = r'/Users/apj/Documents/_HY/Greenland/contour/figures/vgridshift/hypsometry_active_surging_glaciers_filled_global_mean.png'
# create hypsometry and area altitude plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
# hypsometry plot
line5385 = axes[0].plot(result['mu_dh_1953_to_1985'], result['bins'], marker='p', color='k', linewidth= 0.9, label='dh 1953 to 1985')
line532016 = axes[0].plot(result['mu_dh_1953_to_2016'], result['bins'], marker='v', color='b', linewidth=0.9, label='dh 1953 to 2016')
line852016 = axes[0].plot(result['mu_dh_1985_to_2016'], result['bins'], marker='s', color='g', linewidth=0.9, label='dh 1985 to 2016')
axes[0].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[0].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[0].axvline(0, color='grey', ls='--')
axes[0].set_ylabel('Elevation bin (m)')
axes[0].set_xlabel('Average elevation difference (m)')
axes[0].legend(loc=2)
axes[0].grid()
# area-altitude plot
area1953 = axes[1].plot(result['1953Akm2'], result['bins'], marker='s', color='k', linewidth= 0.9, label='1953')
area1985 = axes[1].plot(result['1985Akm2'], result['bins'], marker='^', color='#994C00', linewidth=0.9, label='1985')
area2016 = axes[1].plot(result['2016Akm2'], result['bins'], marker='o', color='#006633', linewidth=0.9, label='2016')
axes[1].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[1].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[1].axvline(0, color='grey', ls='--')
axes[1].set_ylabel('Elevation bin (m)')
axes[1].set_xlabel('Area altitude distribution ($km^2$)') | axes[1].grid()
textstr = '\n'.join((
'Glacierized area',
asumstr[0],
asumstr[1],
asumstr[2]))
# matplotlib patch properties
props = dict(boxstyle='round', facecolor='white', alpha=1)
# place text box in axes coords
axes[1].text(0.5, 0.05, textstr, transform=axes[1].transAxes, fontsize=10,
verticalalignment='center', bbox=props)
# fig title
fig.suptitle('Active surging glaciers', fontsize=20)
plt.tight_layout(pad=1.5)
plt.savefig(fig_out, dpi=600, format='png') | axes[1].legend(loc=1) | random_line_split |
hypsometry_plots.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 11:57:37 2021
Hypsometry plots
To Do:
select only glaciers where 'RGIId' matches with filtered dissolved outline 'RGIId'
@author: apj
"""
import pandas as pd
import geopandas as gpd
from rasterstats import zonal_stats
import rasterio as rio
import rasterio.mask
import matplotlib.pyplot as plt
import numpy as np
import glob
from shapely.validation import explain_validity
import os
# filepaths
fp_tiff = r'/Users/apj/Documents/_HY/Greenland/dem_diff/vgridshift/filled_ddem/*global*.tif'
fp_outlines = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/final/*final_edit.shp'
fp_diss_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_utm_exclude_bin_thresh.shp'
fp_surging_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_all_surging.shp'
fp_exclude = r'/Users/apj/Documents/_HY/Greenland/dem_diff/filled_ddem/glaciers_to_exclude_edit.csv'
fp_dem = r'/Users/apj/Documents/_HY/Greenland/DEM_masked/2016_dem_studyarea_3681x3295.tif'
fp_c = r'/Users/apj/Documents/_HY/Greenland/contour/2016_filled_contour.shp'
def checkGeom(geodataframe):
"""
Function to check validity of geometry. Returns message from shapely explain_validity if geometry is not 'Valid Geometry'
Parameters
----------
geodataframe : TYPE
DESCRIPTION.
Returns
-------
Message.
"""
for geometry in geodataframe.geometry:
if explain_validity(geometry) != 'Valid Geometry':
print(explain_validity(geometry))
def areaDiff(outline, elevation_bin):
"""
Function to calculate area in an elevation bin
Parameters
----------
outline : Polygon
Polygon containing outlines.
elevation_bin : Polygon
Polygon containing elevation ranges
contour_range : String
Elevation range to be selected
Returns
-------
elev_range_area_sum : float
Sum of areas from outline polygon inside the elevation bin
"""
# clip outlines by selected elevation range
outline_elev_range = gpd.clip(outline, elevation_bin, keep_geom_type=(True))
# check that clipped dataframe is not empty
if outline_elev_range.empty == True:
return
# compute area in km2
elev_range_area = outline_elev_range.geometry.area / 1000000
# sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
"""
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
|
# get elevation differences for each elevation bin
for tif in tifflist:
bname = os.path.basename(tif)
# read ddem and mask with selected glaciers
ddem = glacierMask(tif, shapes)
# classify dem to bins
digitized = np.digitize(dem, bins)
# calculate average elevation difference per bin
bin_means = bin_data(bins, ddem, dem, mode='mean', nbinned=False)
# parse column name
colname = 'mu_dh_' + bname[0:12]
# update results
for i, _ in enumerate(bins):
result.loc[result['bins'] == bins[i], colname] = bin_means[i]
# update bins column to integer
result['bins'] = result['bins'].astype(int)
# list for area sum strings
asumstr = []
# add area change to new columns
# loop through dictionary keys and values
for x, y in outlinedict.items():
# store the first four characters (the year) from the filename to variable
year = x[:4]
# check total glacierized area
g_area = sum(y.area) / 1000000
unc_a = g_area * 0.1
print('Area in ' + str(year) + ': {:.2f} ± {:.2f} km2'.format(g_area, unc_a)) #str(round(g_area / 1000000, 3)) + ' km2')
areasum_str = str(year) + ': {:.2f} ± {:.2f} $km^2$'.format(g_area, unc_a)
asumstr.append(areasum_str)
# add column for results
result[str(x[:4])+'Akm2'] = ""
# loop through elevation bins and calculate area altitude difference for each bin
for i in bins:
i = i.astype(int)
# selection by contour range before applying functions
elev_bin = contdis[contdis['low_cont'] == i.astype(str)]
# use function
out = areaDiff(y, elev_bin)
if out is None:
out = 0
# store result to dataframe
result.loc[result['bins'] == i, str(x[:4])+'Akm2'] = out
# calculate area differences (e.g.2016 - 1953 so positive values show area increase and negative decrease)
result['dA53t85'] = result['1985Akm2'] - result['1953Akm2']
result['dA53t16'] = result['2016Akm2'] - result['1953Akm2']
result['dA85t16'] = result['2016Akm2'] - result['1985Akm2']
result = result.dropna(axis=0, how='any')
# figure output
fig_out = r'/Users/apj/Documents/_HY/Greenland/contour/figures/vgridshift/hypsometry_active_surging_glaciers_filled_global_mean.png'
# create hypsometry and area altitude plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
# hypsometry plot
line5385 = axes[0].plot(result['mu_dh_1953_to_1985'], result['bins'], marker='p', color='k', linewidth= 0.9, label='dh 1953 to 1985')
line532016 = axes[0].plot(result['mu_dh_1953_to_2016'], result['bins'], marker='v', color='b', linewidth=0.9, label='dh 1953 to 2016')
line852016 = axes[0].plot(result['mu_dh_1985_to_2016'], result['bins'], marker='s', color='g', linewidth=0.9, label='dh 1985 to 2016')
axes[0].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[0].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[0].axvline(0, color='grey', ls='--')
axes[0].set_ylabel('Elevation bin (m)')
axes[0].set_xlabel('Average elevation difference (m)')
axes[0].legend(loc=2)
axes[0].grid()
# area-altitude plot
area1953 = axes[1].plot(result['1953Akm2'], result['bins'], marker='s', color='k', linewidth= 0.9, label='1953')
area1985 = axes[1].plot(result['1985Akm2'], result['bins'], marker='^', color='#994C00', linewidth=0.9, label='1985')
area2016 = axes[1].plot(result['2016Akm2'], result['bins'], marker='o', color='#006633', linewidth=0.9, label='2016')
axes[1].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[1].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[1].axvline(0, color='grey', ls='--')
axes[1].set_ylabel('Elevation bin (m)')
axes[1].set_xlabel('Area altitude distribution ($km^2$)')
axes[1].legend(loc=1)
axes[1].grid()
textstr = '\n'.join((
'Glacierized area',
asumstr[0],
asumstr[1],
asumstr[2]))
# matplotlib patch properties
props = dict(boxstyle='round', facecolor='white', alpha=1)
# place text box in axes coords
axes[1].text(0.5, 0.05, textstr, transform=axes[1].transAxes, fontsize=10,
verticalalignment='center', bbox=props)
# fig title
fig.suptitle('Active surging glaciers', fontsize=20)
plt.tight_layout(pad=1.5)
plt.savefig(fig_out, dpi=600, format='png')
| tifflist.append(t) | conditional_block |
hypsometry_plots.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 11:57:37 2021
Hypsometry plots
To Do:
select only glaciers where 'RGIId' matches with filtered dissolved outline 'RGIId'
@author: apj
"""
import pandas as pd
import geopandas as gpd
from rasterstats import zonal_stats
import rasterio as rio
import rasterio.mask
import matplotlib.pyplot as plt
import numpy as np
import glob
from shapely.validation import explain_validity
import os
# filepaths
fp_tiff = r'/Users/apj/Documents/_HY/Greenland/dem_diff/vgridshift/filled_ddem/*global*.tif'
fp_outlines = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/final/*final_edit.shp'
fp_diss_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_utm_exclude_bin_thresh.shp'
fp_surging_outline = r'/Users/apj/Documents/_HY/Greenland/outlines/edited_glacier_divides/Dissolved_outline_50s80s2010s_all_surging.shp'
fp_exclude = r'/Users/apj/Documents/_HY/Greenland/dem_diff/filled_ddem/glaciers_to_exclude_edit.csv'
fp_dem = r'/Users/apj/Documents/_HY/Greenland/DEM_masked/2016_dem_studyarea_3681x3295.tif'
fp_c = r'/Users/apj/Documents/_HY/Greenland/contour/2016_filled_contour.shp'
def checkGeom(geodataframe):
"""
Function to check validity of geometry. Returns message from shapely explain_validity if geometry is not 'Valid Geometry'
Parameters
----------
geodataframe : TYPE
DESCRIPTION.
Returns
-------
Message.
"""
for geometry in geodataframe.geometry:
if explain_validity(geometry) != 'Valid Geometry':
print(explain_validity(geometry))
def | (outline, elevation_bin):
"""
Function to calculate area in an elevation bin
Parameters
----------
outline : Polygon
Polygon containing outlines.
elevation_bin : Polygon
Polygon containing elevation ranges
contour_range : String
Elevation range to be selected
Returns
-------
elev_range_area_sum : float
Sum of areas from outline polygon inside the elevation bin
"""
# clip outlines by selected elevation range
outline_elev_range = gpd.clip(outline, elevation_bin, keep_geom_type=(True))
# check that clipped dataframe is not empty
if outline_elev_range.empty == True:
return
# compute area in km2
elev_range_area = outline_elev_range.geometry.area / 1000000
# sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
"""
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
tifflist.append(t)
# get elevation differences for each elevation bin
for tif in tifflist:
bname = os.path.basename(tif)
# read ddem and mask with selected glaciers
ddem = glacierMask(tif, shapes)
# classify dem to bins
digitized = np.digitize(dem, bins)
# calculate average elevation difference per bin
bin_means = bin_data(bins, ddem, dem, mode='mean', nbinned=False)
# parse column name
colname = 'mu_dh_' + bname[0:12]
# update results
for i, _ in enumerate(bins):
result.loc[result['bins'] == bins[i], colname] = bin_means[i]
# update bins column to integer
result['bins'] = result['bins'].astype(int)
# list for area sum strings
asumstr = []
# add area change to new columns
# loop through dictionary keys and values
for x, y in outlinedict.items():
# store the first four characters (the year) from the filename to variable
year = x[:4]
# check total glacierized area
g_area = sum(y.area) / 1000000
unc_a = g_area * 0.1
print('Area in ' + str(year) + ': {:.2f} ± {:.2f} km2'.format(g_area, unc_a)) #str(round(g_area / 1000000, 3)) + ' km2')
areasum_str = str(year) + ': {:.2f} ± {:.2f} $km^2$'.format(g_area, unc_a)
asumstr.append(areasum_str)
# add column for results
result[str(x[:4])+'Akm2'] = ""
# loop through elevation bins and calculate area altitude difference for each bin
for i in bins:
i = i.astype(int)
# selection by contour range before applying functions
elev_bin = contdis[contdis['low_cont'] == i.astype(str)]
# use function
out = areaDiff(y, elev_bin)
if out is None:
out = 0
# store result to dataframe
result.loc[result['bins'] == i, str(x[:4])+'Akm2'] = out
# calculate area differences (e.g.2016 - 1953 so positive values show area increase and negative decrease)
result['dA53t85'] = result['1985Akm2'] - result['1953Akm2']
result['dA53t16'] = result['2016Akm2'] - result['1953Akm2']
result['dA85t16'] = result['2016Akm2'] - result['1985Akm2']
result = result.dropna(axis=0, how='any')
# figure output
fig_out = r'/Users/apj/Documents/_HY/Greenland/contour/figures/vgridshift/hypsometry_active_surging_glaciers_filled_global_mean.png'
# create hypsometry and area altitude plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
# hypsometry plot
line5385 = axes[0].plot(result['mu_dh_1953_to_1985'], result['bins'], marker='p', color='k', linewidth= 0.9, label='dh 1953 to 1985')
line532016 = axes[0].plot(result['mu_dh_1953_to_2016'], result['bins'], marker='v', color='b', linewidth=0.9, label='dh 1953 to 2016')
line852016 = axes[0].plot(result['mu_dh_1985_to_2016'], result['bins'], marker='s', color='g', linewidth=0.9, label='dh 1985 to 2016')
axes[0].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[0].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[0].axvline(0, color='grey', ls='--')
axes[0].set_ylabel('Elevation bin (m)')
axes[0].set_xlabel('Average elevation difference (m)')
axes[0].legend(loc=2)
axes[0].grid()
# area-altitude plot
area1953 = axes[1].plot(result['1953Akm2'], result['bins'], marker='s', color='k', linewidth= 0.9, label='1953')
area1985 = axes[1].plot(result['1985Akm2'], result['bins'], marker='^', color='#994C00', linewidth=0.9, label='1985')
area2016 = axes[1].plot(result['2016Akm2'], result['bins'], marker='o', color='#006633', linewidth=0.9, label='2016')
axes[1].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[1].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[1].axvline(0, color='grey', ls='--')
axes[1].set_ylabel('Elevation bin (m)')
axes[1].set_xlabel('Area altitude distribution ($km^2$)')
axes[1].legend(loc=1)
axes[1].grid()
textstr = '\n'.join((
'Glacierized area',
asumstr[0],
asumstr[1],
asumstr[2]))
# matplotlib patch properties
props = dict(boxstyle='round', facecolor='white', alpha=1)
# place text box in axes coords
axes[1].text(0.5, 0.05, textstr, transform=axes[1].transAxes, fontsize=10,
verticalalignment='center', bbox=props)
# fig title
fig.suptitle('Active surging glaciers', fontsize=20)
plt.tight_layout(pad=1.5)
plt.savefig(fig_out, dpi=600, format='png')
| areaDiff | identifier_name |
keras_spark_rossmann_estimator.py | # %% [markdown]
# (spark_horovod_keras)=
#
# # Data-Parallel Distributed Training Using Horovod on Spark
#
# When time- and compute-intensive deep learning workloads need to be trained efficiently, data-parallel distributed training comes to the rescue.
# This technique parallelizes the data and requires sharing of weights between different worker nodes involved in the distributed training after every epoch, which ensures that all worker nodes train a consistent model.
# Overall, data-parallel distributed training can help speed up the execution time.
#
# In this tutorial, we will understand how data-parallel distributed training works with Flyte, Horovod, and Spark.
#
# We will forecast sales using the Rossmann store sales dataset. As the data preparation step, we will process the data using Spark, a data processing engine. To improve the speed and ease of distributed training, we will use Horovod, a distributed deep learning training framework.
# Lastly, we will build a Keras model and perform distributed training using Horovod's [KerasEstimator API](https://github.com/horovod/horovod/blob/8d34c85ce7ec76e81fb3be99418b0e4d35204dc3/horovod/spark/keras/estimator.py#L88).
#
# Before executing the code, create `work_dir`, an s3 bucket.
#
# Let's get started with the example!
# %% [markdown]
# First, let's import the required packages into the environment.
# %%
import datetime
import os
import pathlib
import subprocess
import sys
from dataclasses import dataclass
from distutils.version import LooseVersion
from typing import Any, Dict, List, Tuple
import flytekit
import horovod.spark.keras as hvd
import pyspark
import pyspark.sql.functions as F
import pyspark.sql.types as T
import tensorflow as tf
import tensorflow.keras.backend as K
from dataclasses_json import dataclass_json
from flytekit import Resources, task, workflow
from flytekit.types.directory import FlyteDirectory
from flytekitplugins.spark import Spark
from horovod.spark.common.backend import SparkBackend
from horovod.spark.common.store import Store
from horovod.tensorflow.keras.callbacks import BestModelCheckpoint
from pyspark import Row
from tensorflow.keras.layers import BatchNormalization, Concatenate, Dense, Dropout, Embedding, Flatten, Input, Reshape
# %% [markdown]
# We define two variables to represent categorical and continuous columns in the dataset.
# %%
CATEGORICAL_COLS = [
"Store",
"State",
"DayOfWeek",
"Year",
"Month",
"Day",
"Week",
"CompetitionMonthsOpen",
"Promo2Weeks",
"StoreType",
"Assortment",
"PromoInterval",
"CompetitionOpenSinceYear",
"Promo2SinceYear",
"Events",
"Promo",
"StateHoliday",
"SchoolHoliday",
]
CONTINUOUS_COLS = [
"CompetitionDistance",
"Max_TemperatureC",
"Mean_TemperatureC",
"Min_TemperatureC",
"Max_Humidity",
"Mean_Humidity",
"Min_Humidity",
"Max_Wind_SpeedKm_h",
"Mean_Wind_SpeedKm_h",
"CloudCover",
"trend",
"trend_de",
"BeforePromo",
"AfterPromo",
"AfterStateHoliday",
"BeforeStateHoliday",
"BeforeSchoolHoliday",
"AfterSchoolHoliday",
]
# %% [markdown]
# Next, let's initialize a data class to store the hyperparameters that will be used with the model (`epochs`, `learning_rate`, `batch_size`, etc.).
# %%
@dataclass_json
@dataclass
class Hyperparameters:
|
# %% [markdown]
# ## Downloading the Data
#
# We define a task to download the data into a `FlyteDirectory`.
# %%
@task(
cache=True,
cache_version="0.1",
)
def download_data(dataset: str) -> FlyteDirectory:
# create a directory named 'data'
print("==============")
print("Downloading data")
print("==============")
working_dir = flytekit.current_context().working_directory
data_dir = pathlib.Path(os.path.join(working_dir, "data"))
data_dir.mkdir(exist_ok=True)
# download the dataset
download_subprocess = subprocess.run(
[
"curl",
dataset,
],
check=True,
capture_output=True,
)
# untar the data
subprocess.run(
[
"tar",
"-xz",
"-C",
data_dir,
],
input=download_subprocess.stdout,
)
# return the directory populated with Rossmann data files
return FlyteDirectory(path=str(data_dir))
# %% [markdown]
# ## Data Preprocessing
#
# 1. Let's start with cleaning and preparing the Google trend data. We create new 'Date' and 'State' columns using PySpark's `withColumn`. These columns, in addition to other features, will contribute to the prediction of sales.
# %%
def prepare_google_trend(
google_trend_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
google_trend_all = google_trend_csv.withColumn(
"Date", F.regexp_extract(google_trend_csv.week, "(.*?) -", 1)
).withColumn("State", F.regexp_extract(google_trend_csv.file, "Rossmann_DE_(.*)", 1))
# map state NI -> HB,NI to align with other data sources
google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def fn(rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
df = df.withColumn(col, lookup(mapping)(df[col]))
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample_rate:
train_csv = train_csv.sample(withReplacement=False, fraction=hp.sample_rate)
test_csv = test_csv.sample(withReplacement=False, fraction=hp.sample_rate)
# prepare the DataFrames from the CSV files
train_df = prepare_df(
train_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
test_df = prepare_df(
test_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
# add elapsed times from the data spanning training & test datasets
elapsed_cols = ["Promo", "StateHoliday", "SchoolHoliday"]
elapsed = add_elapsed(
train_df.select("Date", "Store", *elapsed_cols).unionAll(test_df.select("Date", "Store", *elapsed_cols)),
elapsed_cols,
)
# join with the elapsed times
train_df = train_df.join(elapsed, ["Date", "Store"]).select(
train_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
test_df = test_df.join(elapsed, ["Date", "Store"]).select(
test_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
# filter out zero sales
train_df = train_df.filter(train_df.Sales > 0)
print("===================")
print("Prepared data frame")
print("===================")
train_df.show()
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
# select features
train_df = train_df.select(*(all_cols + ["Sales", "Date"])).cache()
test_df = test_df.select(*(all_cols + ["Id", "Date"])).cache()
# build a vocabulary of categorical columns
vocab = build_vocabulary(
train_df.select(*CATEGORICAL_COLS).unionAll(test_df.select(*CATEGORICAL_COLS)).cache(),
)
# cast continuous columns to float
train_df = cast_columns(train_df, CONTINUOUS_COLS + ["Sales"])
# search for a key and return a list of values based on a key
train_df = lookup_columns(train_df, vocab)
test_df = cast_columns(test_df, CONTINUOUS_COLS)
test_df = lookup_columns(test_df, vocab)
# split into training & validation
# test set is in 2015, use the same period in 2014 from the training set as a validation set
test_min_date = test_df.agg(F.min(test_df.Date)).collect()[0][0]
test_max_date = test_df.agg(F.max(test_df.Date)).collect()[0][0]
one_year = datetime.timedelta(365)
train_df = train_df.withColumn(
"Validation",
(train_df.Date > test_min_date - one_year) & (train_df.Date <= test_max_date - one_year),
)
# determine max Sales number
max_sales = train_df.agg(F.max(train_df.Sales)).collect()[0][0]
# convert Sales to log domain
train_df = train_df.withColumn("Sales", F.log(train_df.Sales))
print("===================================")
print("Data frame with transformed columns")
print("===================================")
train_df.show()
print("================")
print("Data frame sizes")
print("================")
# filter out column validation from the DataFrame, and get the count
train_rows = train_df.filter(~train_df.Validation).count()
val_rows = train_df.filter(train_df.Validation).count()
test_rows = test_df.count()
# print the number of rows in training, validation and test data
print("Training: %d" % train_rows)
print("Validation: %d" % val_rows)
print("Test: %d" % test_rows)
return max_sales, vocab, train_df, test_df
# %% [markdown]
# ## Training
#
# We use `KerasEstimator` in Horovod to train our Keras model on an existing pre-processed Spark DataFrame.
# The Estimator leverages Horovod's ability to scale across multiple workers, thereby eliminating any specialized code to perform distributed training.
# %%
def train(
max_sales: float,
vocab: Dict[str, List[Any]],
hp: Hyperparameters,
work_dir: FlyteDirectory,
train_df: pyspark.sql.DataFrame,
working_dir: FlyteDirectory,
):
print("==============")
print("Model training")
print("==============")
# a method to determine root mean square percentage error of exponential of predictions
def exp_rmspe(y_true, y_pred):
"""Competition evaluation metric, expects logarmithic inputs."""
pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))
# compute mean excluding stores with zero denominator
x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))
y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))
return tf.sqrt(x / y)
def act_sigmoid_scaled(x):
"""Sigmoid scaled to logarithm of maximum sales scaled by 20%."""
return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2
# NOTE: exp_rmse and act_sigmoid_scaled functions are not placed at the module level
# this is because we cannot explicitly send max_sales as an argument to act_sigmoid_scaled since it is an activation function
# two of them are custom objects, and placing one at the module level and the other within the function doesn't really add up
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
CUSTOM_OBJECTS = {"exp_rmspe": exp_rmspe, "act_sigmoid_scaled": act_sigmoid_scaled}
# disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
else:
K.set_session(tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})))
# build the Keras model
inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
embeddings = [
Embedding(len(vocab[col]), 10, input_length=1, name="emb_" + col)(inputs[col]) for col in CATEGORICAL_COLS
]
continuous_bn = Concatenate()([Reshape((1, 1), name="reshape_" + col)(inputs[col]) for col in CONTINUOUS_COLS])
continuous_bn = BatchNormalization()(continuous_bn)
x = Concatenate()(embeddings + [continuous_bn])
x = Flatten()(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(500, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dropout(0.5)(x)
# specify element-wise activation
output = Dense(1, activation=act_sigmoid_scaled)(x)
model = tf.keras.Model([inputs[f] for f in all_cols], output)
# display the details of the Keras model
model.summary()
opt = tf.keras.optimizers.Adam(lr=hp.learning_rate, epsilon=1e-3)
# checkpoint callback to specify the options for the returned Keras model
ckpt_callback = BestModelCheckpoint(monitor="val_loss", mode="auto", save_freq="epoch")
# create an object of Store class
store = Store.create(work_dir.remote_source)
# 'SparkBackend' uses `horovod.spark.run` to execute the distributed training function, and
# returns a list of results by running 'train' on every worker in the cluster
backend = SparkBackend(
num_proc=hp.num_proc,
stdout=sys.stdout,
stderr=sys.stderr,
prefix_output_with_timestamp=True,
)
# define a Spark Estimator that fits Keras models to a DataFrame
keras_estimator = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=opt,
loss="mae",
metrics=[exp_rmspe],
custom_objects=CUSTOM_OBJECTS,
feature_cols=all_cols,
label_cols=["Sales"],
validation="Validation",
batch_size=hp.batch_size,
epochs=hp.epochs,
verbose=2,
checkpoint_callback=ckpt_callback,
)
# The Estimator hides the following details:
# 1. Binding Spark DataFrames to a deep learning training script
# 2. Reading data into a format that can be interpreted by the training framework
# 3. Distributed training using Horovod
# the user would provide a Keras model to the `KerasEstimator``
# this `KerasEstimator`` will fit the data and store it in a Spark DataFrame
keras_model = keras_estimator.fit(train_df).setOutputCols(["Sales_output"])
# retrieve the model training history
history = keras_model.getHistory()
best_val_rmspe = min(history["val_exp_rmspe"])
print("Best RMSPE: %f" % best_val_rmspe)
# save the trained model
keras_model.save(os.path.join(working_dir, hp.local_checkpoint_file))
print("Written checkpoint to %s" % os.path.join(working_dir, hp.local_checkpoint_file))
# the Estimator returns a Transformer representation of the trained model once training is complete
return keras_model
# %% [markdown]
# ## Evaluation
#
# We use the model transformer to forecast sales.
# %%
def test(
keras_model,
working_dir: FlyteDirectory,
test_df: pyspark.sql.DataFrame,
hp: Hyperparameters,
) -> FlyteDirectory:
print("================")
print("Final prediction")
print("================")
pred_df = keras_model.transform(test_df)
pred_df.printSchema()
pred_df.show(5)
# convert from log domain to real Sales numbers
pred_df = pred_df.withColumn("Sales_pred", F.exp(pred_df.Sales_output))
submission_df = pred_df.select(pred_df.Id.cast(T.IntegerType()), pred_df.Sales_pred).toPandas()
submission_df.sort_values(by=["Id"]).to_csv(os.path.join(working_dir, hp.local_submission_csv), index=False)
# predictions are saved to a CSV file.
print("Saved predictions to %s" % hp.local_submission_csv)
return working_dir
# %% [markdown]
# ## Defining the Spark Task
#
# Flyte provides an easy-to-use interface to specify Spark-related attributes.
# The Spark attributes need to be attached to a specific task, and just like that, Flyte can run Spark jobs natively on Kubernetes clusters!
# Within the task, let's call the data pre-processing, training, and evaluation functions.
#
# :::{note}
# To set up Spark, refer to {ref}`flyte-and-spark`.
# :::
# %%
@task(
task_config=Spark(
# the below configuration is applied to the Spark cluster
spark_conf={
"spark.driver.memory": "2000M",
"spark.executor.memory": "2000M",
"spark.executor.cores": "1",
"spark.executor.instances": "2",
"spark.driver.cores": "1",
"spark.sql.shuffle.partitions": "16",
"spark.worker.timeout": "120",
}
),
cache=True,
cache_version="0.2",
requests=Resources(mem="1Gi"),
limits=Resources(mem="1Gi"),
)
def horovod_spark_task(data_dir: FlyteDirectory, hp: Hyperparameters, work_dir: FlyteDirectory) -> FlyteDirectory:
max_sales, vocab, train_df, test_df = data_preparation(data_dir, hp)
# working directory will have the model and predictions as separate files
working_dir = flytekit.current_context().working_directory
keras_model = train(
max_sales,
vocab,
hp,
work_dir,
train_df,
working_dir,
)
# generate predictions
return test(keras_model, working_dir, test_df, hp)
# %% [markdown]
# Lastly, we define a workflow to run the pipeline.
# %%
@workflow
def horovod_spark_wf(
dataset: str = "https://cdn.discordapp.com/attachments/545481172399030272/886952942903627786/rossmann.tgz",
hp: Hyperparameters = Hyperparameters(),
work_dir: FlyteDirectory = "s3://flyte-demo/horovod-tmp/",
) -> FlyteDirectory:
data_dir = download_data(dataset=dataset)
# work_dir corresponds to the Horovod-Spark store
return horovod_spark_task(data_dir=data_dir, hp=hp, work_dir=work_dir)
# %% [markdown]
# ## Running the Model Locally
#
# We can run the code locally too, provided Spark is enabled and the plugin is set up in the environment.
#
# %%
if __name__ == "__main__":
metrics_directory = horovod_spark_wf()
print(f"Find the model and predictions at {metrics_directory}")
| batch_size: int = 100
sample_rate: float = 0.01
learning_rate: float = 0.0001
num_proc: int = 2
epochs: int = 100
local_checkpoint_file: str = "checkpoint.h5"
local_submission_csv: str = "submission.csv" | identifier_body |
keras_spark_rossmann_estimator.py | # %% [markdown]
# (spark_horovod_keras)=
#
# # Data-Parallel Distributed Training Using Horovod on Spark
#
# When time- and compute-intensive deep learning workloads need to be trained efficiently, data-parallel distributed training comes to the rescue.
# This technique parallelizes the data and requires sharing of weights between different worker nodes involved in the distributed training after every epoch, which ensures that all worker nodes train a consistent model.
# Overall, data-parallel distributed training can help speed up the execution time.
#
# In this tutorial, we will understand how data-parallel distributed training works with Flyte, Horovod, and Spark.
#
# We will forecast sales using the Rossmann store sales dataset. As the data preparation step, we will process the data using Spark, a data processing engine. To improve the speed and ease of distributed training, we will use Horovod, a distributed deep learning training framework.
# Lastly, we will build a Keras model and perform distributed training using Horovod's [KerasEstimator API](https://github.com/horovod/horovod/blob/8d34c85ce7ec76e81fb3be99418b0e4d35204dc3/horovod/spark/keras/estimator.py#L88).
#
# Before executing the code, create `work_dir`, an s3 bucket.
#
# Let's get started with the example!
# %% [markdown]
# First, let's import the required packages into the environment.
# %%
import datetime
import os
import pathlib
import subprocess
import sys
from dataclasses import dataclass
from distutils.version import LooseVersion
from typing import Any, Dict, List, Tuple
import flytekit
import horovod.spark.keras as hvd
import pyspark
import pyspark.sql.functions as F
import pyspark.sql.types as T
import tensorflow as tf
import tensorflow.keras.backend as K
from dataclasses_json import dataclass_json
from flytekit import Resources, task, workflow
from flytekit.types.directory import FlyteDirectory
from flytekitplugins.spark import Spark
from horovod.spark.common.backend import SparkBackend
from horovod.spark.common.store import Store
from horovod.tensorflow.keras.callbacks import BestModelCheckpoint
from pyspark import Row
from tensorflow.keras.layers import BatchNormalization, Concatenate, Dense, Dropout, Embedding, Flatten, Input, Reshape
# %% [markdown]
# We define two variables to represent categorical and continuous columns in the dataset.
# %%
CATEGORICAL_COLS = [
"Store",
"State",
"DayOfWeek",
"Year",
"Month",
"Day",
"Week",
"CompetitionMonthsOpen",
"Promo2Weeks",
"StoreType",
"Assortment",
"PromoInterval",
"CompetitionOpenSinceYear",
"Promo2SinceYear",
"Events",
"Promo",
"StateHoliday",
"SchoolHoliday",
]
CONTINUOUS_COLS = [
"CompetitionDistance",
"Max_TemperatureC",
"Mean_TemperatureC",
"Min_TemperatureC",
"Max_Humidity",
"Mean_Humidity",
"Min_Humidity",
"Max_Wind_SpeedKm_h",
"Mean_Wind_SpeedKm_h",
"CloudCover",
"trend",
"trend_de",
"BeforePromo",
"AfterPromo",
"AfterStateHoliday",
"BeforeStateHoliday",
"BeforeSchoolHoliday",
"AfterSchoolHoliday",
]
# %% [markdown]
# Next, let's initialize a data class to store the hyperparameters that will be used with the model (`epochs`, `learning_rate`, `batch_size`, etc.).
# %%
@dataclass_json
@dataclass
class Hyperparameters:
batch_size: int = 100
sample_rate: float = 0.01
learning_rate: float = 0.0001
num_proc: int = 2
epochs: int = 100
local_checkpoint_file: str = "checkpoint.h5"
local_submission_csv: str = "submission.csv"
# %% [markdown]
# ## Downloading the Data
#
# We define a task to download the data into a `FlyteDirectory`.
# %%
@task(
cache=True,
cache_version="0.1",
)
def download_data(dataset: str) -> FlyteDirectory:
# create a directory named 'data'
print("==============")
print("Downloading data")
print("==============")
working_dir = flytekit.current_context().working_directory
data_dir = pathlib.Path(os.path.join(working_dir, "data"))
data_dir.mkdir(exist_ok=True)
# download the dataset
download_subprocess = subprocess.run(
[
"curl",
dataset,
],
check=True,
capture_output=True,
)
# untar the data
subprocess.run(
[
"tar",
"-xz",
"-C",
data_dir,
],
input=download_subprocess.stdout,
)
# return the directory populated with Rossmann data files
return FlyteDirectory(path=str(data_dir))
# %% [markdown]
# ## Data Preprocessing
#
# 1. Let's start with cleaning and preparing the Google trend data. We create new 'Date' and 'State' columns using PySpark's `withColumn`. These columns, in addition to other features, will contribute to the prediction of sales.
# %%
def prepare_google_trend(
google_trend_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
google_trend_all = google_trend_csv.withColumn(
"Date", F.regexp_extract(google_trend_csv.week, "(.*?) -", 1)
).withColumn("State", F.regexp_extract(google_trend_csv.file, "Rossmann_DE_(.*)", 1))
# map state NI -> HB,NI to align with other data sources
google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def fn(rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
df = df.withColumn(col, lookup(mapping)(df[col]))
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample_rate:
train_csv = train_csv.sample(withReplacement=False, fraction=hp.sample_rate)
test_csv = test_csv.sample(withReplacement=False, fraction=hp.sample_rate)
# prepare the DataFrames from the CSV files
train_df = prepare_df(
train_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
test_df = prepare_df(
test_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
# add elapsed times from the data spanning training & test datasets
elapsed_cols = ["Promo", "StateHoliday", "SchoolHoliday"]
elapsed = add_elapsed(
train_df.select("Date", "Store", *elapsed_cols).unionAll(test_df.select("Date", "Store", *elapsed_cols)),
elapsed_cols,
)
# join with the elapsed times
train_df = train_df.join(elapsed, ["Date", "Store"]).select(
train_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
test_df = test_df.join(elapsed, ["Date", "Store"]).select(
test_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
# filter out zero sales
train_df = train_df.filter(train_df.Sales > 0)
print("===================")
print("Prepared data frame")
print("===================")
train_df.show()
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
# select features
train_df = train_df.select(*(all_cols + ["Sales", "Date"])).cache()
test_df = test_df.select(*(all_cols + ["Id", "Date"])).cache()
# build a vocabulary of categorical columns
vocab = build_vocabulary(
train_df.select(*CATEGORICAL_COLS).unionAll(test_df.select(*CATEGORICAL_COLS)).cache(),
)
# cast continuous columns to float
train_df = cast_columns(train_df, CONTINUOUS_COLS + ["Sales"])
# search for a key and return a list of values based on a key
train_df = lookup_columns(train_df, vocab)
test_df = cast_columns(test_df, CONTINUOUS_COLS)
test_df = lookup_columns(test_df, vocab)
# split into training & validation
# test set is in 2015, use the same period in 2014 from the training set as a validation set
test_min_date = test_df.agg(F.min(test_df.Date)).collect()[0][0]
test_max_date = test_df.agg(F.max(test_df.Date)).collect()[0][0]
one_year = datetime.timedelta(365)
train_df = train_df.withColumn(
"Validation",
(train_df.Date > test_min_date - one_year) & (train_df.Date <= test_max_date - one_year),
)
# determine max Sales number
max_sales = train_df.agg(F.max(train_df.Sales)).collect()[0][0]
# convert Sales to log domain
train_df = train_df.withColumn("Sales", F.log(train_df.Sales))
print("===================================")
print("Data frame with transformed columns")
print("===================================")
train_df.show()
print("================")
print("Data frame sizes")
print("================")
# filter out column validation from the DataFrame, and get the count
train_rows = train_df.filter(~train_df.Validation).count()
val_rows = train_df.filter(train_df.Validation).count()
test_rows = test_df.count()
# print the number of rows in training, validation and test data
print("Training: %d" % train_rows)
print("Validation: %d" % val_rows)
print("Test: %d" % test_rows)
return max_sales, vocab, train_df, test_df
# %% [markdown]
# ## Training
#
# We use `KerasEstimator` in Horovod to train our Keras model on an existing pre-processed Spark DataFrame.
# The Estimator leverages Horovod's ability to scale across multiple workers, thereby eliminating any specialized code to perform distributed training.
# %%
def train(
max_sales: float,
vocab: Dict[str, List[Any]],
hp: Hyperparameters,
work_dir: FlyteDirectory,
train_df: pyspark.sql.DataFrame,
working_dir: FlyteDirectory,
):
print("==============")
print("Model training")
print("==============")
# a method to determine root mean square percentage error of exponential of predictions
def exp_rmspe(y_true, y_pred):
"""Competition evaluation metric, expects logarmithic inputs."""
pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))
# compute mean excluding stores with zero denominator
x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))
y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))
return tf.sqrt(x / y)
def act_sigmoid_scaled(x):
"""Sigmoid scaled to logarithm of maximum sales scaled by 20%."""
return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2
# NOTE: exp_rmse and act_sigmoid_scaled functions are not placed at the module level
# this is because we cannot explicitly send max_sales as an argument to act_sigmoid_scaled since it is an activation function
# two of them are custom objects, and placing one at the module level and the other within the function doesn't really add up
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
CUSTOM_OBJECTS = {"exp_rmspe": exp_rmspe, "act_sigmoid_scaled": act_sigmoid_scaled}
# disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
else:
K.set_session(tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})))
# build the Keras model | continuous_bn = Concatenate()([Reshape((1, 1), name="reshape_" + col)(inputs[col]) for col in CONTINUOUS_COLS])
continuous_bn = BatchNormalization()(continuous_bn)
x = Concatenate()(embeddings + [continuous_bn])
x = Flatten()(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(500, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dropout(0.5)(x)
# specify element-wise activation
output = Dense(1, activation=act_sigmoid_scaled)(x)
model = tf.keras.Model([inputs[f] for f in all_cols], output)
# display the details of the Keras model
model.summary()
opt = tf.keras.optimizers.Adam(lr=hp.learning_rate, epsilon=1e-3)
# checkpoint callback to specify the options for the returned Keras model
ckpt_callback = BestModelCheckpoint(monitor="val_loss", mode="auto", save_freq="epoch")
# create an object of Store class
store = Store.create(work_dir.remote_source)
# 'SparkBackend' uses `horovod.spark.run` to execute the distributed training function, and
# returns a list of results by running 'train' on every worker in the cluster
backend = SparkBackend(
num_proc=hp.num_proc,
stdout=sys.stdout,
stderr=sys.stderr,
prefix_output_with_timestamp=True,
)
# define a Spark Estimator that fits Keras models to a DataFrame
keras_estimator = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=opt,
loss="mae",
metrics=[exp_rmspe],
custom_objects=CUSTOM_OBJECTS,
feature_cols=all_cols,
label_cols=["Sales"],
validation="Validation",
batch_size=hp.batch_size,
epochs=hp.epochs,
verbose=2,
checkpoint_callback=ckpt_callback,
)
# The Estimator hides the following details:
# 1. Binding Spark DataFrames to a deep learning training script
# 2. Reading data into a format that can be interpreted by the training framework
# 3. Distributed training using Horovod
# the user would provide a Keras model to the `KerasEstimator``
# this `KerasEstimator`` will fit the data and store it in a Spark DataFrame
keras_model = keras_estimator.fit(train_df).setOutputCols(["Sales_output"])
# retrieve the model training history
history = keras_model.getHistory()
best_val_rmspe = min(history["val_exp_rmspe"])
print("Best RMSPE: %f" % best_val_rmspe)
# save the trained model
keras_model.save(os.path.join(working_dir, hp.local_checkpoint_file))
print("Written checkpoint to %s" % os.path.join(working_dir, hp.local_checkpoint_file))
# the Estimator returns a Transformer representation of the trained model once training is complete
return keras_model
# %% [markdown]
# ## Evaluation
#
# We use the model transformer to forecast sales.
# %%
def test(
keras_model,
working_dir: FlyteDirectory,
test_df: pyspark.sql.DataFrame,
hp: Hyperparameters,
) -> FlyteDirectory:
print("================")
print("Final prediction")
print("================")
pred_df = keras_model.transform(test_df)
pred_df.printSchema()
pred_df.show(5)
# convert from log domain to real Sales numbers
pred_df = pred_df.withColumn("Sales_pred", F.exp(pred_df.Sales_output))
submission_df = pred_df.select(pred_df.Id.cast(T.IntegerType()), pred_df.Sales_pred).toPandas()
submission_df.sort_values(by=["Id"]).to_csv(os.path.join(working_dir, hp.local_submission_csv), index=False)
# predictions are saved to a CSV file.
print("Saved predictions to %s" % hp.local_submission_csv)
return working_dir
# %% [markdown]
# ## Defining the Spark Task
#
# Flyte provides an easy-to-use interface to specify Spark-related attributes.
# The Spark attributes need to be attached to a specific task, and just like that, Flyte can run Spark jobs natively on Kubernetes clusters!
# Within the task, let's call the data pre-processing, training, and evaluation functions.
#
# :::{note}
# To set up Spark, refer to {ref}`flyte-and-spark`.
# :::
# %%
@task(
task_config=Spark(
# the below configuration is applied to the Spark cluster
spark_conf={
"spark.driver.memory": "2000M",
"spark.executor.memory": "2000M",
"spark.executor.cores": "1",
"spark.executor.instances": "2",
"spark.driver.cores": "1",
"spark.sql.shuffle.partitions": "16",
"spark.worker.timeout": "120",
}
),
cache=True,
cache_version="0.2",
requests=Resources(mem="1Gi"),
limits=Resources(mem="1Gi"),
)
def horovod_spark_task(data_dir: FlyteDirectory, hp: Hyperparameters, work_dir: FlyteDirectory) -> FlyteDirectory:
max_sales, vocab, train_df, test_df = data_preparation(data_dir, hp)
# working directory will have the model and predictions as separate files
working_dir = flytekit.current_context().working_directory
keras_model = train(
max_sales,
vocab,
hp,
work_dir,
train_df,
working_dir,
)
# generate predictions
return test(keras_model, working_dir, test_df, hp)
# %% [markdown]
# Lastly, we define a workflow to run the pipeline.
# %%
@workflow
def horovod_spark_wf(
dataset: str = "https://cdn.discordapp.com/attachments/545481172399030272/886952942903627786/rossmann.tgz",
hp: Hyperparameters = Hyperparameters(),
work_dir: FlyteDirectory = "s3://flyte-demo/horovod-tmp/",
) -> FlyteDirectory:
data_dir = download_data(dataset=dataset)
# work_dir corresponds to the Horovod-Spark store
return horovod_spark_task(data_dir=data_dir, hp=hp, work_dir=work_dir)
# %% [markdown]
# ## Running the Model Locally
#
# We can run the code locally too, provided Spark is enabled and the plugin is set up in the environment.
#
# %%
if __name__ == "__main__":
metrics_directory = horovod_spark_wf()
print(f"Find the model and predictions at {metrics_directory}") | inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
embeddings = [
Embedding(len(vocab[col]), 10, input_length=1, name="emb_" + col)(inputs[col]) for col in CATEGORICAL_COLS
] | random_line_split |
keras_spark_rossmann_estimator.py | # %% [markdown]
# (spark_horovod_keras)=
#
# # Data-Parallel Distributed Training Using Horovod on Spark
#
# When time- and compute-intensive deep learning workloads need to be trained efficiently, data-parallel distributed training comes to the rescue.
# This technique parallelizes the data and requires sharing of weights between different worker nodes involved in the distributed training after every epoch, which ensures that all worker nodes train a consistent model.
# Overall, data-parallel distributed training can help speed up the execution time.
#
# In this tutorial, we will understand how data-parallel distributed training works with Flyte, Horovod, and Spark.
#
# We will forecast sales using the Rossmann store sales dataset. As the data preparation step, we will process the data using Spark, a data processing engine. To improve the speed and ease of distributed training, we will use Horovod, a distributed deep learning training framework.
# Lastly, we will build a Keras model and perform distributed training using Horovod's [KerasEstimator API](https://github.com/horovod/horovod/blob/8d34c85ce7ec76e81fb3be99418b0e4d35204dc3/horovod/spark/keras/estimator.py#L88).
#
# Before executing the code, create `work_dir`, an s3 bucket.
#
# Let's get started with the example!
# %% [markdown]
# First, let's import the required packages into the environment.
# %%
import datetime
import os
import pathlib
import subprocess
import sys
from dataclasses import dataclass
from distutils.version import LooseVersion
from typing import Any, Dict, List, Tuple
import flytekit
import horovod.spark.keras as hvd
import pyspark
import pyspark.sql.functions as F
import pyspark.sql.types as T
import tensorflow as tf
import tensorflow.keras.backend as K
from dataclasses_json import dataclass_json
from flytekit import Resources, task, workflow
from flytekit.types.directory import FlyteDirectory
from flytekitplugins.spark import Spark
from horovod.spark.common.backend import SparkBackend
from horovod.spark.common.store import Store
from horovod.tensorflow.keras.callbacks import BestModelCheckpoint
from pyspark import Row
from tensorflow.keras.layers import BatchNormalization, Concatenate, Dense, Dropout, Embedding, Flatten, Input, Reshape
# %% [markdown]
# We define two variables to represent categorical and continuous columns in the dataset.
# %%
CATEGORICAL_COLS = [
"Store",
"State",
"DayOfWeek",
"Year",
"Month",
"Day",
"Week",
"CompetitionMonthsOpen",
"Promo2Weeks",
"StoreType",
"Assortment",
"PromoInterval",
"CompetitionOpenSinceYear",
"Promo2SinceYear",
"Events",
"Promo",
"StateHoliday",
"SchoolHoliday",
]
CONTINUOUS_COLS = [
"CompetitionDistance",
"Max_TemperatureC",
"Mean_TemperatureC",
"Min_TemperatureC",
"Max_Humidity",
"Mean_Humidity",
"Min_Humidity",
"Max_Wind_SpeedKm_h",
"Mean_Wind_SpeedKm_h",
"CloudCover",
"trend",
"trend_de",
"BeforePromo",
"AfterPromo",
"AfterStateHoliday",
"BeforeStateHoliday",
"BeforeSchoolHoliday",
"AfterSchoolHoliday",
]
# %% [markdown]
# Next, let's initialize a data class to store the hyperparameters that will be used with the model (`epochs`, `learning_rate`, `batch_size`, etc.).
# %%
@dataclass_json
@dataclass
class Hyperparameters:
batch_size: int = 100
sample_rate: float = 0.01
learning_rate: float = 0.0001
num_proc: int = 2
epochs: int = 100
local_checkpoint_file: str = "checkpoint.h5"
local_submission_csv: str = "submission.csv"
# %% [markdown]
# ## Downloading the Data
#
# We define a task to download the data into a `FlyteDirectory`.
# %%
@task(
cache=True,
cache_version="0.1",
)
def download_data(dataset: str) -> FlyteDirectory:
# create a directory named 'data'
print("==============")
print("Downloading data")
print("==============")
working_dir = flytekit.current_context().working_directory
data_dir = pathlib.Path(os.path.join(working_dir, "data"))
data_dir.mkdir(exist_ok=True)
# download the dataset
download_subprocess = subprocess.run(
[
"curl",
dataset,
],
check=True,
capture_output=True,
)
# untar the data
subprocess.run(
[
"tar",
"-xz",
"-C",
data_dir,
],
input=download_subprocess.stdout,
)
# return the directory populated with Rossmann data files
return FlyteDirectory(path=str(data_dir))
# %% [markdown]
# ## Data Preprocessing
#
# 1. Let's start with cleaning and preparing the Google trend data. We create new 'Date' and 'State' columns using PySpark's `withColumn`. These columns, in addition to other features, will contribute to the prediction of sales.
# %%
def prepare_google_trend(
google_trend_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
google_trend_all = google_trend_csv.withColumn(
"Date", F.regexp_extract(google_trend_csv.week, "(.*?) -", 1)
).withColumn("State", F.regexp_extract(google_trend_csv.file, "Rossmann_DE_(.*)", 1))
# map state NI -> HB,NI to align with other data sources
google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def fn(rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
|
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample_rate:
train_csv = train_csv.sample(withReplacement=False, fraction=hp.sample_rate)
test_csv = test_csv.sample(withReplacement=False, fraction=hp.sample_rate)
# prepare the DataFrames from the CSV files
train_df = prepare_df(
train_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
test_df = prepare_df(
test_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
# add elapsed times from the data spanning training & test datasets
elapsed_cols = ["Promo", "StateHoliday", "SchoolHoliday"]
elapsed = add_elapsed(
train_df.select("Date", "Store", *elapsed_cols).unionAll(test_df.select("Date", "Store", *elapsed_cols)),
elapsed_cols,
)
# join with the elapsed times
train_df = train_df.join(elapsed, ["Date", "Store"]).select(
train_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
test_df = test_df.join(elapsed, ["Date", "Store"]).select(
test_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
# filter out zero sales
train_df = train_df.filter(train_df.Sales > 0)
print("===================")
print("Prepared data frame")
print("===================")
train_df.show()
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
# select features
train_df = train_df.select(*(all_cols + ["Sales", "Date"])).cache()
test_df = test_df.select(*(all_cols + ["Id", "Date"])).cache()
# build a vocabulary of categorical columns
vocab = build_vocabulary(
train_df.select(*CATEGORICAL_COLS).unionAll(test_df.select(*CATEGORICAL_COLS)).cache(),
)
# cast continuous columns to float
train_df = cast_columns(train_df, CONTINUOUS_COLS + ["Sales"])
# search for a key and return a list of values based on a key
train_df = lookup_columns(train_df, vocab)
test_df = cast_columns(test_df, CONTINUOUS_COLS)
test_df = lookup_columns(test_df, vocab)
# split into training & validation
# test set is in 2015, use the same period in 2014 from the training set as a validation set
test_min_date = test_df.agg(F.min(test_df.Date)).collect()[0][0]
test_max_date = test_df.agg(F.max(test_df.Date)).collect()[0][0]
one_year = datetime.timedelta(365)
train_df = train_df.withColumn(
"Validation",
(train_df.Date > test_min_date - one_year) & (train_df.Date <= test_max_date - one_year),
)
# determine max Sales number
max_sales = train_df.agg(F.max(train_df.Sales)).collect()[0][0]
# convert Sales to log domain
train_df = train_df.withColumn("Sales", F.log(train_df.Sales))
print("===================================")
print("Data frame with transformed columns")
print("===================================")
train_df.show()
print("================")
print("Data frame sizes")
print("================")
# filter out column validation from the DataFrame, and get the count
train_rows = train_df.filter(~train_df.Validation).count()
val_rows = train_df.filter(train_df.Validation).count()
test_rows = test_df.count()
# print the number of rows in training, validation and test data
print("Training: %d" % train_rows)
print("Validation: %d" % val_rows)
print("Test: %d" % test_rows)
return max_sales, vocab, train_df, test_df
# %% [markdown]
# ## Training
#
# We use `KerasEstimator` in Horovod to train our Keras model on an existing pre-processed Spark DataFrame.
# The Estimator leverages Horovod's ability to scale across multiple workers, thereby eliminating any specialized code to perform distributed training.
# %%
def train(
max_sales: float,
vocab: Dict[str, List[Any]],
hp: Hyperparameters,
work_dir: FlyteDirectory,
train_df: pyspark.sql.DataFrame,
working_dir: FlyteDirectory,
):
print("==============")
print("Model training")
print("==============")
# a method to determine root mean square percentage error of exponential of predictions
def exp_rmspe(y_true, y_pred):
"""Competition evaluation metric, expects logarmithic inputs."""
pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))
# compute mean excluding stores with zero denominator
x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))
y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))
return tf.sqrt(x / y)
def act_sigmoid_scaled(x):
"""Sigmoid scaled to logarithm of maximum sales scaled by 20%."""
return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2
# NOTE: exp_rmse and act_sigmoid_scaled functions are not placed at the module level
# this is because we cannot explicitly send max_sales as an argument to act_sigmoid_scaled since it is an activation function
# two of them are custom objects, and placing one at the module level and the other within the function doesn't really add up
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
CUSTOM_OBJECTS = {"exp_rmspe": exp_rmspe, "act_sigmoid_scaled": act_sigmoid_scaled}
# disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
else:
K.set_session(tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})))
# build the Keras model
inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
embeddings = [
Embedding(len(vocab[col]), 10, input_length=1, name="emb_" + col)(inputs[col]) for col in CATEGORICAL_COLS
]
continuous_bn = Concatenate()([Reshape((1, 1), name="reshape_" + col)(inputs[col]) for col in CONTINUOUS_COLS])
continuous_bn = BatchNormalization()(continuous_bn)
x = Concatenate()(embeddings + [continuous_bn])
x = Flatten()(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(500, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dropout(0.5)(x)
# specify element-wise activation
output = Dense(1, activation=act_sigmoid_scaled)(x)
model = tf.keras.Model([inputs[f] for f in all_cols], output)
# display the details of the Keras model
model.summary()
opt = tf.keras.optimizers.Adam(lr=hp.learning_rate, epsilon=1e-3)
# checkpoint callback to specify the options for the returned Keras model
ckpt_callback = BestModelCheckpoint(monitor="val_loss", mode="auto", save_freq="epoch")
# create an object of Store class
store = Store.create(work_dir.remote_source)
# 'SparkBackend' uses `horovod.spark.run` to execute the distributed training function, and
# returns a list of results by running 'train' on every worker in the cluster
backend = SparkBackend(
num_proc=hp.num_proc,
stdout=sys.stdout,
stderr=sys.stderr,
prefix_output_with_timestamp=True,
)
# define a Spark Estimator that fits Keras models to a DataFrame
keras_estimator = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=opt,
loss="mae",
metrics=[exp_rmspe],
custom_objects=CUSTOM_OBJECTS,
feature_cols=all_cols,
label_cols=["Sales"],
validation="Validation",
batch_size=hp.batch_size,
epochs=hp.epochs,
verbose=2,
checkpoint_callback=ckpt_callback,
)
# The Estimator hides the following details:
# 1. Binding Spark DataFrames to a deep learning training script
# 2. Reading data into a format that can be interpreted by the training framework
# 3. Distributed training using Horovod
# the user would provide a Keras model to the `KerasEstimator``
# this `KerasEstimator`` will fit the data and store it in a Spark DataFrame
keras_model = keras_estimator.fit(train_df).setOutputCols(["Sales_output"])
# retrieve the model training history
history = keras_model.getHistory()
best_val_rmspe = min(history["val_exp_rmspe"])
print("Best RMSPE: %f" % best_val_rmspe)
# save the trained model
keras_model.save(os.path.join(working_dir, hp.local_checkpoint_file))
print("Written checkpoint to %s" % os.path.join(working_dir, hp.local_checkpoint_file))
# the Estimator returns a Transformer representation of the trained model once training is complete
return keras_model
# %% [markdown]
# ## Evaluation
#
# We use the model transformer to forecast sales.
# %%
def test(
keras_model,
working_dir: FlyteDirectory,
test_df: pyspark.sql.DataFrame,
hp: Hyperparameters,
) -> FlyteDirectory:
print("================")
print("Final prediction")
print("================")
pred_df = keras_model.transform(test_df)
pred_df.printSchema()
pred_df.show(5)
# convert from log domain to real Sales numbers
pred_df = pred_df.withColumn("Sales_pred", F.exp(pred_df.Sales_output))
submission_df = pred_df.select(pred_df.Id.cast(T.IntegerType()), pred_df.Sales_pred).toPandas()
submission_df.sort_values(by=["Id"]).to_csv(os.path.join(working_dir, hp.local_submission_csv), index=False)
# predictions are saved to a CSV file.
print("Saved predictions to %s" % hp.local_submission_csv)
return working_dir
# %% [markdown]
# ## Defining the Spark Task
#
# Flyte provides an easy-to-use interface to specify Spark-related attributes.
# The Spark attributes need to be attached to a specific task, and just like that, Flyte can run Spark jobs natively on Kubernetes clusters!
# Within the task, let's call the data pre-processing, training, and evaluation functions.
#
# :::{note}
# To set up Spark, refer to {ref}`flyte-and-spark`.
# :::
# %%
@task(
task_config=Spark(
# the below configuration is applied to the Spark cluster
spark_conf={
"spark.driver.memory": "2000M",
"spark.executor.memory": "2000M",
"spark.executor.cores": "1",
"spark.executor.instances": "2",
"spark.driver.cores": "1",
"spark.sql.shuffle.partitions": "16",
"spark.worker.timeout": "120",
}
),
cache=True,
cache_version="0.2",
requests=Resources(mem="1Gi"),
limits=Resources(mem="1Gi"),
)
def horovod_spark_task(data_dir: FlyteDirectory, hp: Hyperparameters, work_dir: FlyteDirectory) -> FlyteDirectory:
max_sales, vocab, train_df, test_df = data_preparation(data_dir, hp)
# working directory will have the model and predictions as separate files
working_dir = flytekit.current_context().working_directory
keras_model = train(
max_sales,
vocab,
hp,
work_dir,
train_df,
working_dir,
)
# generate predictions
return test(keras_model, working_dir, test_df, hp)
# %% [markdown]
# Lastly, we define a workflow to run the pipeline.
# %%
@workflow
def horovod_spark_wf(
dataset: str = "https://cdn.discordapp.com/attachments/545481172399030272/886952942903627786/rossmann.tgz",
hp: Hyperparameters = Hyperparameters(),
work_dir: FlyteDirectory = "s3://flyte-demo/horovod-tmp/",
) -> FlyteDirectory:
data_dir = download_data(dataset=dataset)
# work_dir corresponds to the Horovod-Spark store
return horovod_spark_task(data_dir=data_dir, hp=hp, work_dir=work_dir)
# %% [markdown]
# ## Running the Model Locally
#
# We can run the code locally too, provided Spark is enabled and the plugin is set up in the environment.
#
# %%
if __name__ == "__main__":
metrics_directory = horovod_spark_wf()
print(f"Find the model and predictions at {metrics_directory}")
| df = df.withColumn(col, lookup(mapping)(df[col])) | conditional_block |
keras_spark_rossmann_estimator.py | # %% [markdown]
# (spark_horovod_keras)=
#
# # Data-Parallel Distributed Training Using Horovod on Spark
#
# When time- and compute-intensive deep learning workloads need to be trained efficiently, data-parallel distributed training comes to the rescue.
# This technique parallelizes the data and requires sharing of weights between different worker nodes involved in the distributed training after every epoch, which ensures that all worker nodes train a consistent model.
# Overall, data-parallel distributed training can help speed up the execution time.
#
# In this tutorial, we will understand how data-parallel distributed training works with Flyte, Horovod, and Spark.
#
# We will forecast sales using the Rossmann store sales dataset. As the data preparation step, we will process the data using Spark, a data processing engine. To improve the speed and ease of distributed training, we will use Horovod, a distributed deep learning training framework.
# Lastly, we will build a Keras model and perform distributed training using Horovod's [KerasEstimator API](https://github.com/horovod/horovod/blob/8d34c85ce7ec76e81fb3be99418b0e4d35204dc3/horovod/spark/keras/estimator.py#L88).
#
# Before executing the code, create `work_dir`, an s3 bucket.
#
# Let's get started with the example!
# %% [markdown]
# First, let's import the required packages into the environment.
# %%
import datetime
import os
import pathlib
import subprocess
import sys
from dataclasses import dataclass
from distutils.version import LooseVersion
from typing import Any, Dict, List, Tuple
import flytekit
import horovod.spark.keras as hvd
import pyspark
import pyspark.sql.functions as F
import pyspark.sql.types as T
import tensorflow as tf
import tensorflow.keras.backend as K
from dataclasses_json import dataclass_json
from flytekit import Resources, task, workflow
from flytekit.types.directory import FlyteDirectory
from flytekitplugins.spark import Spark
from horovod.spark.common.backend import SparkBackend
from horovod.spark.common.store import Store
from horovod.tensorflow.keras.callbacks import BestModelCheckpoint
from pyspark import Row
from tensorflow.keras.layers import BatchNormalization, Concatenate, Dense, Dropout, Embedding, Flatten, Input, Reshape
# %% [markdown]
# We define two variables to represent categorical and continuous columns in the dataset.
# %%
CATEGORICAL_COLS = [
"Store",
"State",
"DayOfWeek",
"Year",
"Month",
"Day",
"Week",
"CompetitionMonthsOpen",
"Promo2Weeks",
"StoreType",
"Assortment",
"PromoInterval",
"CompetitionOpenSinceYear",
"Promo2SinceYear",
"Events",
"Promo",
"StateHoliday",
"SchoolHoliday",
]
CONTINUOUS_COLS = [
"CompetitionDistance",
"Max_TemperatureC",
"Mean_TemperatureC",
"Min_TemperatureC",
"Max_Humidity",
"Mean_Humidity",
"Min_Humidity",
"Max_Wind_SpeedKm_h",
"Mean_Wind_SpeedKm_h",
"CloudCover",
"trend",
"trend_de",
"BeforePromo",
"AfterPromo",
"AfterStateHoliday",
"BeforeStateHoliday",
"BeforeSchoolHoliday",
"AfterSchoolHoliday",
]
# %% [markdown]
# Next, let's initialize a data class to store the hyperparameters that will be used with the model (`epochs`, `learning_rate`, `batch_size`, etc.).
# %%
@dataclass_json
@dataclass
class Hyperparameters:
batch_size: int = 100
sample_rate: float = 0.01
learning_rate: float = 0.0001
num_proc: int = 2
epochs: int = 100
local_checkpoint_file: str = "checkpoint.h5"
local_submission_csv: str = "submission.csv"
# %% [markdown]
# ## Downloading the Data
#
# We define a task to download the data into a `FlyteDirectory`.
# %%
@task(
cache=True,
cache_version="0.1",
)
def download_data(dataset: str) -> FlyteDirectory:
# create a directory named 'data'
print("==============")
print("Downloading data")
print("==============")
working_dir = flytekit.current_context().working_directory
data_dir = pathlib.Path(os.path.join(working_dir, "data"))
data_dir.mkdir(exist_ok=True)
# download the dataset
download_subprocess = subprocess.run(
[
"curl",
dataset,
],
check=True,
capture_output=True,
)
# untar the data
subprocess.run(
[
"tar",
"-xz",
"-C",
data_dir,
],
input=download_subprocess.stdout,
)
# return the directory populated with Rossmann data files
return FlyteDirectory(path=str(data_dir))
# %% [markdown]
# ## Data Preprocessing
#
# 1. Let's start with cleaning and preparing the Google trend data. We create new 'Date' and 'State' columns using PySpark's `withColumn`. These columns, in addition to other features, will contribute to the prediction of sales.
# %%
def prepare_google_trend(
google_trend_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
google_trend_all = google_trend_csv.withColumn(
"Date", F.regexp_extract(google_trend_csv.week, "(.*?) -", 1)
).withColumn("State", F.regexp_extract(google_trend_csv.file, "Rossmann_DE_(.*)", 1))
# map state NI -> HB,NI to align with other data sources
google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def | (rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
df = df.withColumn(col, lookup(mapping)(df[col]))
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample_rate:
train_csv = train_csv.sample(withReplacement=False, fraction=hp.sample_rate)
test_csv = test_csv.sample(withReplacement=False, fraction=hp.sample_rate)
# prepare the DataFrames from the CSV files
train_df = prepare_df(
train_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
test_df = prepare_df(
test_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
# add elapsed times from the data spanning training & test datasets
elapsed_cols = ["Promo", "StateHoliday", "SchoolHoliday"]
elapsed = add_elapsed(
train_df.select("Date", "Store", *elapsed_cols).unionAll(test_df.select("Date", "Store", *elapsed_cols)),
elapsed_cols,
)
# join with the elapsed times
train_df = train_df.join(elapsed, ["Date", "Store"]).select(
train_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
test_df = test_df.join(elapsed, ["Date", "Store"]).select(
test_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
# filter out zero sales
train_df = train_df.filter(train_df.Sales > 0)
print("===================")
print("Prepared data frame")
print("===================")
train_df.show()
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
# select features
train_df = train_df.select(*(all_cols + ["Sales", "Date"])).cache()
test_df = test_df.select(*(all_cols + ["Id", "Date"])).cache()
# build a vocabulary of categorical columns
vocab = build_vocabulary(
train_df.select(*CATEGORICAL_COLS).unionAll(test_df.select(*CATEGORICAL_COLS)).cache(),
)
# cast continuous columns to float
train_df = cast_columns(train_df, CONTINUOUS_COLS + ["Sales"])
# search for a key and return a list of values based on a key
train_df = lookup_columns(train_df, vocab)
test_df = cast_columns(test_df, CONTINUOUS_COLS)
test_df = lookup_columns(test_df, vocab)
# split into training & validation
# test set is in 2015, use the same period in 2014 from the training set as a validation set
test_min_date = test_df.agg(F.min(test_df.Date)).collect()[0][0]
test_max_date = test_df.agg(F.max(test_df.Date)).collect()[0][0]
one_year = datetime.timedelta(365)
train_df = train_df.withColumn(
"Validation",
(train_df.Date > test_min_date - one_year) & (train_df.Date <= test_max_date - one_year),
)
# determine max Sales number
max_sales = train_df.agg(F.max(train_df.Sales)).collect()[0][0]
# convert Sales to log domain
train_df = train_df.withColumn("Sales", F.log(train_df.Sales))
print("===================================")
print("Data frame with transformed columns")
print("===================================")
train_df.show()
print("================")
print("Data frame sizes")
print("================")
# filter out column validation from the DataFrame, and get the count
train_rows = train_df.filter(~train_df.Validation).count()
val_rows = train_df.filter(train_df.Validation).count()
test_rows = test_df.count()
# print the number of rows in training, validation and test data
print("Training: %d" % train_rows)
print("Validation: %d" % val_rows)
print("Test: %d" % test_rows)
return max_sales, vocab, train_df, test_df
# %% [markdown]
# ## Training
#
# We use `KerasEstimator` in Horovod to train our Keras model on an existing pre-processed Spark DataFrame.
# The Estimator leverages Horovod's ability to scale across multiple workers, thereby eliminating any specialized code to perform distributed training.
# %%
def train(
max_sales: float,
vocab: Dict[str, List[Any]],
hp: Hyperparameters,
work_dir: FlyteDirectory,
train_df: pyspark.sql.DataFrame,
working_dir: FlyteDirectory,
):
print("==============")
print("Model training")
print("==============")
# a method to determine root mean square percentage error of exponential of predictions
def exp_rmspe(y_true, y_pred):
"""Competition evaluation metric, expects logarmithic inputs."""
pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))
# compute mean excluding stores with zero denominator
x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))
y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))
return tf.sqrt(x / y)
def act_sigmoid_scaled(x):
"""Sigmoid scaled to logarithm of maximum sales scaled by 20%."""
return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2
# NOTE: exp_rmse and act_sigmoid_scaled functions are not placed at the module level
# this is because we cannot explicitly send max_sales as an argument to act_sigmoid_scaled since it is an activation function
# two of them are custom objects, and placing one at the module level and the other within the function doesn't really add up
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
CUSTOM_OBJECTS = {"exp_rmspe": exp_rmspe, "act_sigmoid_scaled": act_sigmoid_scaled}
# disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
else:
K.set_session(tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})))
# build the Keras model
inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
embeddings = [
Embedding(len(vocab[col]), 10, input_length=1, name="emb_" + col)(inputs[col]) for col in CATEGORICAL_COLS
]
continuous_bn = Concatenate()([Reshape((1, 1), name="reshape_" + col)(inputs[col]) for col in CONTINUOUS_COLS])
continuous_bn = BatchNormalization()(continuous_bn)
x = Concatenate()(embeddings + [continuous_bn])
x = Flatten()(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(1000, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dense(500, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
x = Dropout(0.5)(x)
# specify element-wise activation
output = Dense(1, activation=act_sigmoid_scaled)(x)
model = tf.keras.Model([inputs[f] for f in all_cols], output)
# display the details of the Keras model
model.summary()
opt = tf.keras.optimizers.Adam(lr=hp.learning_rate, epsilon=1e-3)
# checkpoint callback to specify the options for the returned Keras model
ckpt_callback = BestModelCheckpoint(monitor="val_loss", mode="auto", save_freq="epoch")
# create an object of Store class
store = Store.create(work_dir.remote_source)
# 'SparkBackend' uses `horovod.spark.run` to execute the distributed training function, and
# returns a list of results by running 'train' on every worker in the cluster
backend = SparkBackend(
num_proc=hp.num_proc,
stdout=sys.stdout,
stderr=sys.stderr,
prefix_output_with_timestamp=True,
)
# define a Spark Estimator that fits Keras models to a DataFrame
keras_estimator = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=opt,
loss="mae",
metrics=[exp_rmspe],
custom_objects=CUSTOM_OBJECTS,
feature_cols=all_cols,
label_cols=["Sales"],
validation="Validation",
batch_size=hp.batch_size,
epochs=hp.epochs,
verbose=2,
checkpoint_callback=ckpt_callback,
)
# The Estimator hides the following details:
# 1. Binding Spark DataFrames to a deep learning training script
# 2. Reading data into a format that can be interpreted by the training framework
# 3. Distributed training using Horovod
# the user would provide a Keras model to the `KerasEstimator``
# this `KerasEstimator`` will fit the data and store it in a Spark DataFrame
keras_model = keras_estimator.fit(train_df).setOutputCols(["Sales_output"])
# retrieve the model training history
history = keras_model.getHistory()
best_val_rmspe = min(history["val_exp_rmspe"])
print("Best RMSPE: %f" % best_val_rmspe)
# save the trained model
keras_model.save(os.path.join(working_dir, hp.local_checkpoint_file))
print("Written checkpoint to %s" % os.path.join(working_dir, hp.local_checkpoint_file))
# the Estimator returns a Transformer representation of the trained model once training is complete
return keras_model
# %% [markdown]
# ## Evaluation
#
# We use the model transformer to forecast sales.
# %%
def test(
keras_model,
working_dir: FlyteDirectory,
test_df: pyspark.sql.DataFrame,
hp: Hyperparameters,
) -> FlyteDirectory:
print("================")
print("Final prediction")
print("================")
pred_df = keras_model.transform(test_df)
pred_df.printSchema()
pred_df.show(5)
# convert from log domain to real Sales numbers
pred_df = pred_df.withColumn("Sales_pred", F.exp(pred_df.Sales_output))
submission_df = pred_df.select(pred_df.Id.cast(T.IntegerType()), pred_df.Sales_pred).toPandas()
submission_df.sort_values(by=["Id"]).to_csv(os.path.join(working_dir, hp.local_submission_csv), index=False)
# predictions are saved to a CSV file.
print("Saved predictions to %s" % hp.local_submission_csv)
return working_dir
# %% [markdown]
# ## Defining the Spark Task
#
# Flyte provides an easy-to-use interface to specify Spark-related attributes.
# The Spark attributes need to be attached to a specific task, and just like that, Flyte can run Spark jobs natively on Kubernetes clusters!
# Within the task, let's call the data pre-processing, training, and evaluation functions.
#
# :::{note}
# To set up Spark, refer to {ref}`flyte-and-spark`.
# :::
# %%
@task(
task_config=Spark(
# the below configuration is applied to the Spark cluster
spark_conf={
"spark.driver.memory": "2000M",
"spark.executor.memory": "2000M",
"spark.executor.cores": "1",
"spark.executor.instances": "2",
"spark.driver.cores": "1",
"spark.sql.shuffle.partitions": "16",
"spark.worker.timeout": "120",
}
),
cache=True,
cache_version="0.2",
requests=Resources(mem="1Gi"),
limits=Resources(mem="1Gi"),
)
def horovod_spark_task(data_dir: FlyteDirectory, hp: Hyperparameters, work_dir: FlyteDirectory) -> FlyteDirectory:
max_sales, vocab, train_df, test_df = data_preparation(data_dir, hp)
# working directory will have the model and predictions as separate files
working_dir = flytekit.current_context().working_directory
keras_model = train(
max_sales,
vocab,
hp,
work_dir,
train_df,
working_dir,
)
# generate predictions
return test(keras_model, working_dir, test_df, hp)
# %% [markdown]
# Lastly, we define a workflow to run the pipeline.
# %%
@workflow
def horovod_spark_wf(
dataset: str = "https://cdn.discordapp.com/attachments/545481172399030272/886952942903627786/rossmann.tgz",
hp: Hyperparameters = Hyperparameters(),
work_dir: FlyteDirectory = "s3://flyte-demo/horovod-tmp/",
) -> FlyteDirectory:
data_dir = download_data(dataset=dataset)
# work_dir corresponds to the Horovod-Spark store
return horovod_spark_task(data_dir=data_dir, hp=hp, work_dir=work_dir)
# %% [markdown]
# ## Running the Model Locally
#
# We can run the code locally too, provided Spark is enabled and the plugin is set up in the environment.
#
# %%
if __name__ == "__main__":
metrics_directory = horovod_spark_wf()
print(f"Find the model and predictions at {metrics_directory}")
| fn | identifier_name |
container.go | package runtime
import (
"context"
"fmt"
"io"
"regexp"
"time"
"code.cloudfoundry.org/garden"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
uuid "github.com/nu7hatch/gouuid"
"github.com/opencontainers/runtime-spec/specs-go"
)
const (
SuperuserPath = "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Path = "PATH=/usr/local/bin:/usr/bin:/bin"
GraceTimeKey = "garden.grace-time"
)
type UserNotFoundError struct {
User string
}
func (u UserNotFoundError) Error() string {
return fmt.Sprintf("user '%s' not found: no matching entries in /etc/passwd", u.User)
}
type Container struct {
container containerd.Container
killer Killer
rootfsManager RootfsManager
}
func NewContainer(
container containerd.Container,
killer Killer,
rootfsManager RootfsManager,
) *Container {
return &Container{
container: container,
killer: killer,
rootfsManager: rootfsManager,
}
}
var _ garden.Container = (*Container)(nil)
func (c *Container) Handle() string {
return c.container.ID()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil |
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setupContainerdProcSpec(gdnProcSpec garden.ProcessSpec, containerSpec specs.Spec) (specs.Process, error) {
procSpec := containerSpec.Process
procSpec.Args = append([]string{gdnProcSpec.Path}, gdnProcSpec.Args...)
procSpec.Env = append(procSpec.Env, gdnProcSpec.Env...)
cwd := gdnProcSpec.Dir
if cwd == "" {
cwd = "/"
}
procSpec.Cwd = cwd
if gdnProcSpec.TTY != nil {
procSpec.Terminal = true
if gdnProcSpec.TTY.WindowSize != nil {
procSpec.ConsoleSize = &specs.Box{
Width: uint(gdnProcSpec.TTY.WindowSize.Columns),
Height: uint(gdnProcSpec.TTY.WindowSize.Rows),
}
}
}
if gdnProcSpec.User != "" {
var ok bool
var err error
procSpec.User, ok, err = c.rootfsManager.LookupUser(containerSpec.Root.Path, gdnProcSpec.User)
if err != nil {
return specs.Process{}, fmt.Errorf("lookup user: %w", err)
}
if !ok {
return specs.Process{}, UserNotFoundError{User: gdnProcSpec.User}
}
setUserEnv := fmt.Sprintf("USER=%s", gdnProcSpec.User)
procSpec.Env = append(procSpec.Env, setUserEnv)
}
if pathEnv := envWithDefaultPath(procSpec.User.UID, procSpec.Env); pathEnv != "" {
procSpec.Env = append(procSpec.Env, pathEnv)
}
return *procSpec, nil
}
// Set a default path based on the UID if no existing PATH is found
//
func envWithDefaultPath(uid uint32, currentEnv []string) string {
pathRegexp := regexp.MustCompile("^PATH=.*$")
for _, envVar := range currentEnv {
if pathRegexp.MatchString(envVar) {
return ""
}
}
if uid == 0 {
return SuperuserPath
}
return Path
}
func containerdCIO(gdnProcIO garden.ProcessIO, tty bool) []cio.Opt {
if !tty {
return []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
}
}
cioOpts := []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
cio.WithTerminal,
}
return cioOpts
}
func isNoSuchExecutable(err error) bool {
noSuchFile := regexp.MustCompile(`starting container process caused: exec: .*: stat .*: no such file or directory`)
executableNotFound := regexp.MustCompile(`starting container process caused: exec: .*: executable file not found in \$PATH`)
return noSuchFile.MatchString(err.Error()) || executableNotFound.MatchString(err.Error())
}
| {
return fmt.Errorf("set label: %w", err)
} | conditional_block |
container.go | package runtime
import (
"context"
"fmt"
"io"
"regexp"
"time"
"code.cloudfoundry.org/garden"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
uuid "github.com/nu7hatch/gouuid"
"github.com/opencontainers/runtime-spec/specs-go"
)
const (
SuperuserPath = "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Path = "PATH=/usr/local/bin:/usr/bin:/bin"
GraceTimeKey = "garden.grace-time"
)
type UserNotFoundError struct {
User string
}
func (u UserNotFoundError) Error() string {
return fmt.Sprintf("user '%s' not found: no matching entries in /etc/passwd", u.User)
}
type Container struct {
container containerd.Container
killer Killer
rootfsManager RootfsManager
}
func NewContainer(
container containerd.Container,
killer Killer,
rootfsManager RootfsManager,
) *Container {
return &Container{
container: container,
killer: killer,
rootfsManager: rootfsManager,
}
}
var _ garden.Container = (*Container)(nil)
func (c *Container) Handle() string {
return c.container.ID()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil { |
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setupContainerdProcSpec(gdnProcSpec garden.ProcessSpec, containerSpec specs.Spec) (specs.Process, error) {
procSpec := containerSpec.Process
procSpec.Args = append([]string{gdnProcSpec.Path}, gdnProcSpec.Args...)
procSpec.Env = append(procSpec.Env, gdnProcSpec.Env...)
cwd := gdnProcSpec.Dir
if cwd == "" {
cwd = "/"
}
procSpec.Cwd = cwd
if gdnProcSpec.TTY != nil {
procSpec.Terminal = true
if gdnProcSpec.TTY.WindowSize != nil {
procSpec.ConsoleSize = &specs.Box{
Width: uint(gdnProcSpec.TTY.WindowSize.Columns),
Height: uint(gdnProcSpec.TTY.WindowSize.Rows),
}
}
}
if gdnProcSpec.User != "" {
var ok bool
var err error
procSpec.User, ok, err = c.rootfsManager.LookupUser(containerSpec.Root.Path, gdnProcSpec.User)
if err != nil {
return specs.Process{}, fmt.Errorf("lookup user: %w", err)
}
if !ok {
return specs.Process{}, UserNotFoundError{User: gdnProcSpec.User}
}
setUserEnv := fmt.Sprintf("USER=%s", gdnProcSpec.User)
procSpec.Env = append(procSpec.Env, setUserEnv)
}
if pathEnv := envWithDefaultPath(procSpec.User.UID, procSpec.Env); pathEnv != "" {
procSpec.Env = append(procSpec.Env, pathEnv)
}
return *procSpec, nil
}
// Set a default path based on the UID if no existing PATH is found
//
func envWithDefaultPath(uid uint32, currentEnv []string) string {
pathRegexp := regexp.MustCompile("^PATH=.*$")
for _, envVar := range currentEnv {
if pathRegexp.MatchString(envVar) {
return ""
}
}
if uid == 0 {
return SuperuserPath
}
return Path
}
func containerdCIO(gdnProcIO garden.ProcessIO, tty bool) []cio.Opt {
if !tty {
return []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
}
}
cioOpts := []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
cio.WithTerminal,
}
return cioOpts
}
func isNoSuchExecutable(err error) bool {
noSuchFile := regexp.MustCompile(`starting container process caused: exec: .*: stat .*: no such file or directory`)
executableNotFound := regexp.MustCompile(`starting container process caused: exec: .*: executable file not found in \$PATH`)
return noSuchFile.MatchString(err.Error()) || executableNotFound.MatchString(err.Error())
} | return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false) | random_line_split |
container.go | package runtime
import (
"context"
"fmt"
"io"
"regexp"
"time"
"code.cloudfoundry.org/garden"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
uuid "github.com/nu7hatch/gouuid"
"github.com/opencontainers/runtime-spec/specs-go"
)
const (
SuperuserPath = "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Path = "PATH=/usr/local/bin:/usr/bin:/bin"
GraceTimeKey = "garden.grace-time"
)
type UserNotFoundError struct {
User string
}
func (u UserNotFoundError) Error() string {
return fmt.Sprintf("user '%s' not found: no matching entries in /etc/passwd", u.User)
}
type Container struct {
container containerd.Container
killer Killer
rootfsManager RootfsManager
}
func NewContainer(
container containerd.Container,
killer Killer,
rootfsManager RootfsManager,
) *Container {
return &Container{
container: container,
killer: killer,
rootfsManager: rootfsManager,
}
}
var _ garden.Container = (*Container)(nil)
func (c *Container) Handle() string {
return c.container.ID()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) |
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setupContainerdProcSpec(gdnProcSpec garden.ProcessSpec, containerSpec specs.Spec) (specs.Process, error) {
procSpec := containerSpec.Process
procSpec.Args = append([]string{gdnProcSpec.Path}, gdnProcSpec.Args...)
procSpec.Env = append(procSpec.Env, gdnProcSpec.Env...)
cwd := gdnProcSpec.Dir
if cwd == "" {
cwd = "/"
}
procSpec.Cwd = cwd
if gdnProcSpec.TTY != nil {
procSpec.Terminal = true
if gdnProcSpec.TTY.WindowSize != nil {
procSpec.ConsoleSize = &specs.Box{
Width: uint(gdnProcSpec.TTY.WindowSize.Columns),
Height: uint(gdnProcSpec.TTY.WindowSize.Rows),
}
}
}
if gdnProcSpec.User != "" {
var ok bool
var err error
procSpec.User, ok, err = c.rootfsManager.LookupUser(containerSpec.Root.Path, gdnProcSpec.User)
if err != nil {
return specs.Process{}, fmt.Errorf("lookup user: %w", err)
}
if !ok {
return specs.Process{}, UserNotFoundError{User: gdnProcSpec.User}
}
setUserEnv := fmt.Sprintf("USER=%s", gdnProcSpec.User)
procSpec.Env = append(procSpec.Env, setUserEnv)
}
if pathEnv := envWithDefaultPath(procSpec.User.UID, procSpec.Env); pathEnv != "" {
procSpec.Env = append(procSpec.Env, pathEnv)
}
return *procSpec, nil
}
// Set a default path based on the UID if no existing PATH is found
//
func envWithDefaultPath(uid uint32, currentEnv []string) string {
pathRegexp := regexp.MustCompile("^PATH=.*$")
for _, envVar := range currentEnv {
if pathRegexp.MatchString(envVar) {
return ""
}
}
if uid == 0 {
return SuperuserPath
}
return Path
}
func containerdCIO(gdnProcIO garden.ProcessIO, tty bool) []cio.Opt {
if !tty {
return []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
}
}
cioOpts := []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
cio.WithTerminal,
}
return cioOpts
}
func isNoSuchExecutable(err error) bool {
noSuchFile := regexp.MustCompile(`starting container process caused: exec: .*: stat .*: no such file or directory`)
executableNotFound := regexp.MustCompile(`starting container process caused: exec: .*: executable file not found in \$PATH`)
return noSuchFile.MatchString(err.Error()) || executableNotFound.MatchString(err.Error())
}
| {
err = ErrNotImplemented
return
} | identifier_body |
container.go | package runtime
import (
"context"
"fmt"
"io"
"regexp"
"time"
"code.cloudfoundry.org/garden"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
uuid "github.com/nu7hatch/gouuid"
"github.com/opencontainers/runtime-spec/specs-go"
)
const (
SuperuserPath = "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Path = "PATH=/usr/local/bin:/usr/bin:/bin"
GraceTimeKey = "garden.grace-time"
)
type UserNotFoundError struct {
User string
}
func (u UserNotFoundError) Error() string {
return fmt.Sprintf("user '%s' not found: no matching entries in /etc/passwd", u.User)
}
type Container struct {
container containerd.Container
killer Killer
rootfsManager RootfsManager
}
func NewContainer(
container containerd.Container,
killer Killer,
rootfsManager RootfsManager,
) *Container {
return &Container{
container: container,
killer: killer,
rootfsManager: rootfsManager,
}
}
var _ garden.Container = (*Container)(nil)
func (c *Container) Handle() string {
return c.container.ID()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) | (name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setupContainerdProcSpec(gdnProcSpec garden.ProcessSpec, containerSpec specs.Spec) (specs.Process, error) {
procSpec := containerSpec.Process
procSpec.Args = append([]string{gdnProcSpec.Path}, gdnProcSpec.Args...)
procSpec.Env = append(procSpec.Env, gdnProcSpec.Env...)
cwd := gdnProcSpec.Dir
if cwd == "" {
cwd = "/"
}
procSpec.Cwd = cwd
if gdnProcSpec.TTY != nil {
procSpec.Terminal = true
if gdnProcSpec.TTY.WindowSize != nil {
procSpec.ConsoleSize = &specs.Box{
Width: uint(gdnProcSpec.TTY.WindowSize.Columns),
Height: uint(gdnProcSpec.TTY.WindowSize.Rows),
}
}
}
if gdnProcSpec.User != "" {
var ok bool
var err error
procSpec.User, ok, err = c.rootfsManager.LookupUser(containerSpec.Root.Path, gdnProcSpec.User)
if err != nil {
return specs.Process{}, fmt.Errorf("lookup user: %w", err)
}
if !ok {
return specs.Process{}, UserNotFoundError{User: gdnProcSpec.User}
}
setUserEnv := fmt.Sprintf("USER=%s", gdnProcSpec.User)
procSpec.Env = append(procSpec.Env, setUserEnv)
}
if pathEnv := envWithDefaultPath(procSpec.User.UID, procSpec.Env); pathEnv != "" {
procSpec.Env = append(procSpec.Env, pathEnv)
}
return *procSpec, nil
}
// Set a default path based on the UID if no existing PATH is found
//
func envWithDefaultPath(uid uint32, currentEnv []string) string {
pathRegexp := regexp.MustCompile("^PATH=.*$")
for _, envVar := range currentEnv {
if pathRegexp.MatchString(envVar) {
return ""
}
}
if uid == 0 {
return SuperuserPath
}
return Path
}
func containerdCIO(gdnProcIO garden.ProcessIO, tty bool) []cio.Opt {
if !tty {
return []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
}
}
cioOpts := []cio.Opt{
cio.WithStreams(
gdnProcIO.Stdin,
gdnProcIO.Stdout,
gdnProcIO.Stderr,
),
cio.WithTerminal,
}
return cioOpts
}
func isNoSuchExecutable(err error) bool {
noSuchFile := regexp.MustCompile(`starting container process caused: exec: .*: stat .*: no such file or directory`)
executableNotFound := regexp.MustCompile(`starting container process caused: exec: .*: executable file not found in \$PATH`)
return noSuchFile.MatchString(err.Error()) || executableNotFound.MatchString(err.Error())
}
| Property | identifier_name |
k8s.go | /*
Copyright 2016 caicloud authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/zoumo/logdog"
apiv1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// K8SCloud ...
type K8SCloud struct {
name string
host string
bearerToken string
namespace string
insecure bool
inCluster bool
client *kubernetes.Clientset
}
// NewK8SCloud ...
func NewK8SCloud(opts Options) (CloudProvider, error) {
if opts.K8SInCluster == true {
return NewK8SCloudInCluster(opts)
}
return newK8SCloud(opts)
}
// newK8SCloud returns a cloud object which uses the Options
func newK8SCloud(opts Options) (CloudProvider, error) {
if opts.Name == "" {
return nil, errors.New("K8SCloud: Invalid cloud name")
}
if opts.Host == "" {
return nil, errors.New("K8SCloud: Invalid cloud host")
}
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name
func (cloud *K8SCloud) Name() string {
return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) | () (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used {
resource.Used[string(k)] = NewQuantityFor(v)
}
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase)
}
}
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Create(worker.pod)
if err != nil {
return err
}
// wait until pod is running
err = wait.Poll(7*time.Second, 2*time.Minute, check)
if err != nil {
logdog.Error("K8SPodWorker: do worker error", logdog.Fields{"err": err})
return err
}
// add time
worker.createTime = time.Now()
worker.dueTime = worker.createTime.Add(time.Duration(WorkerTimeout))
worker.pod = pod
return nil
}
// GetWorkerInfo returns worker's infomation
func (worker *K8SPodWorker) GetWorkerInfo() WorkerInfo {
return WorkerInfo{
CloudName: worker.Name(),
CloudKind: worker.Kind(),
CreateTime: worker.createTime,
DueTime: worker.dueTime,
PodName: worker.pod.Name,
Namespace: worker.namespace,
}
}
// IsTimeout returns true if worker is timeout
// and returns the time left until it is due
func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {
now := time.Now()
if now.After(worker.dueTime) {
return true, time.Duration(0)
}
return false, worker.dueTime.Sub(now)
}
// Terminate terminates the worker and destroy it
func (worker *K8SPodWorker) Terminate() error {
client := worker.Client().CoreV1().Pods(worker.namespace)
GracePeriodSeconds := int64(0)
logdog.Debug("worker terminating...", logdog.Fields{"cloud": worker.Name(), "kind": worker.Kind(), "podName": worker.pod.Name})
if Debug {
req := client.GetLogs(worker.pod.Name, &apiv1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
logdog.Error("Can not read log from pod", logdog.Fields{
"cloud": worker.Name(),
"kind": worker.Kind(),
"podName": worker.pod.Name,
"err": err,
})
} else {
defer readCloser.Close()
content, _ := ioutil.ReadAll(readCloser)
logdog.Debug(string(content))
}
}
err := client.Delete(
worker.pod.Name,
&meta_v1.DeleteOptions{
GracePeriodSeconds: &GracePeriodSeconds,
})
return err
}
func buildK8SEnv(id string, opts WorkerOptions) []apiv1.EnvVar {
env := []apiv1.EnvVar{
{
Name: WorkerEventID,
Value: id,
},
{
Name: CycloneServer,
Value: opts.WorkerEnvs.CycloneServer,
},
{
Name: ConsoleWebEndpoint,
Value: opts.WorkerEnvs.ConsoleWebEndpoint,
},
{
Name: RegistryLocation,
Value: opts.WorkerEnvs.RegistryLocation,
},
{
Name: RegistryUsername,
Value: opts.WorkerEnvs.RegistryUsername,
},
{
Name: RegistryPassword,
Value: opts.WorkerEnvs.RegistryPassword,
},
{
Name: GitlabURL,
Value: opts.WorkerEnvs.GitlabURL,
},
{
Name: LogServer,
Value: opts.WorkerEnvs.LogServer,
},
{
Name: WorkerImage,
Value: opts.WorkerEnvs.WorkerImage,
},
{
Name: LimitCPU,
Value: opts.Quota[ResourceLimitsCPU].String(),
},
{
Name: LimitMemory,
Value: opts.Quota[ResourceLimitsMemory].String(),
},
{
Name: RequestCPU,
Value: opts.Quota[ResourceRequestsCPU].String(),
},
{
Name: RequestMemory,
Value: opts.Quota[ResourceRequestsMemory].String(),
},
}
return env
}
| Resource | identifier_name |
k8s.go | /*
Copyright 2016 caicloud authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/zoumo/logdog"
apiv1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// K8SCloud ...
type K8SCloud struct {
name string
host string
bearerToken string
namespace string
insecure bool
inCluster bool
client *kubernetes.Clientset
}
// NewK8SCloud ...
func NewK8SCloud(opts Options) (CloudProvider, error) {
if opts.K8SInCluster == true {
return NewK8SCloudInCluster(opts)
}
return newK8SCloud(opts)
}
// newK8SCloud returns a cloud object which uses the Options
func newK8SCloud(opts Options) (CloudProvider, error) {
if opts.Name == "" {
return nil, errors.New("K8SCloud: Invalid cloud name")
}
if opts.Host == "" {
return nil, errors.New("K8SCloud: Invalid cloud host")
}
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name
func (cloud *K8SCloud) Name() string {
return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) Resource() (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used |
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase)
}
}
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Create(worker.pod)
if err != nil {
return err
}
// wait until pod is running
err = wait.Poll(7*time.Second, 2*time.Minute, check)
if err != nil {
logdog.Error("K8SPodWorker: do worker error", logdog.Fields{"err": err})
return err
}
// add time
worker.createTime = time.Now()
worker.dueTime = worker.createTime.Add(time.Duration(WorkerTimeout))
worker.pod = pod
return nil
}
// GetWorkerInfo returns worker's infomation
func (worker *K8SPodWorker) GetWorkerInfo() WorkerInfo {
return WorkerInfo{
CloudName: worker.Name(),
CloudKind: worker.Kind(),
CreateTime: worker.createTime,
DueTime: worker.dueTime,
PodName: worker.pod.Name,
Namespace: worker.namespace,
}
}
// IsTimeout returns true if worker is timeout
// and returns the time left until it is due
func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {
now := time.Now()
if now.After(worker.dueTime) {
return true, time.Duration(0)
}
return false, worker.dueTime.Sub(now)
}
// Terminate terminates the worker and destroy it
func (worker *K8SPodWorker) Terminate() error {
client := worker.Client().CoreV1().Pods(worker.namespace)
GracePeriodSeconds := int64(0)
logdog.Debug("worker terminating...", logdog.Fields{"cloud": worker.Name(), "kind": worker.Kind(), "podName": worker.pod.Name})
if Debug {
req := client.GetLogs(worker.pod.Name, &apiv1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
logdog.Error("Can not read log from pod", logdog.Fields{
"cloud": worker.Name(),
"kind": worker.Kind(),
"podName": worker.pod.Name,
"err": err,
})
} else {
defer readCloser.Close()
content, _ := ioutil.ReadAll(readCloser)
logdog.Debug(string(content))
}
}
err := client.Delete(
worker.pod.Name,
&meta_v1.DeleteOptions{
GracePeriodSeconds: &GracePeriodSeconds,
})
return err
}
func buildK8SEnv(id string, opts WorkerOptions) []apiv1.EnvVar {
env := []apiv1.EnvVar{
{
Name: WorkerEventID,
Value: id,
},
{
Name: CycloneServer,
Value: opts.WorkerEnvs.CycloneServer,
},
{
Name: ConsoleWebEndpoint,
Value: opts.WorkerEnvs.ConsoleWebEndpoint,
},
{
Name: RegistryLocation,
Value: opts.WorkerEnvs.RegistryLocation,
},
{
Name: RegistryUsername,
Value: opts.WorkerEnvs.RegistryUsername,
},
{
Name: RegistryPassword,
Value: opts.WorkerEnvs.RegistryPassword,
},
{
Name: GitlabURL,
Value: opts.WorkerEnvs.GitlabURL,
},
{
Name: LogServer,
Value: opts.WorkerEnvs.LogServer,
},
{
Name: WorkerImage,
Value: opts.WorkerEnvs.WorkerImage,
},
{
Name: LimitCPU,
Value: opts.Quota[ResourceLimitsCPU].String(),
},
{
Name: LimitMemory,
Value: opts.Quota[ResourceLimitsMemory].String(),
},
{
Name: RequestCPU,
Value: opts.Quota[ResourceRequestsCPU].String(),
},
{
Name: RequestMemory,
Value: opts.Quota[ResourceRequestsMemory].String(),
},
}
return env
}
| {
resource.Used[string(k)] = NewQuantityFor(v)
} | conditional_block |
k8s.go | /*
Copyright 2016 caicloud authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/zoumo/logdog"
apiv1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// K8SCloud ...
type K8SCloud struct {
name string
host string
bearerToken string
namespace string
insecure bool
inCluster bool
client *kubernetes.Clientset
}
// NewK8SCloud ...
func NewK8SCloud(opts Options) (CloudProvider, error) {
if opts.K8SInCluster == true {
return NewK8SCloudInCluster(opts)
}
return newK8SCloud(opts)
}
// newK8SCloud returns a cloud object which uses the Options
func newK8SCloud(opts Options) (CloudProvider, error) {
if opts.Name == "" {
return nil, errors.New("K8SCloud: Invalid cloud name")
}
if opts.Host == "" {
return nil, errors.New("K8SCloud: Invalid cloud host")
}
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name
func (cloud *K8SCloud) Name() string {
return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) Resource() (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used {
resource.Used[string(k)] = NewQuantityFor(v)
}
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase)
}
}
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Create(worker.pod)
if err != nil {
return err
}
// wait until pod is running
err = wait.Poll(7*time.Second, 2*time.Minute, check)
if err != nil {
logdog.Error("K8SPodWorker: do worker error", logdog.Fields{"err": err})
return err
}
// add time
worker.createTime = time.Now()
worker.dueTime = worker.createTime.Add(time.Duration(WorkerTimeout))
worker.pod = pod
return nil
}
// GetWorkerInfo returns worker's infomation
func (worker *K8SPodWorker) GetWorkerInfo() WorkerInfo {
return WorkerInfo{
CloudName: worker.Name(),
CloudKind: worker.Kind(),
CreateTime: worker.createTime,
DueTime: worker.dueTime,
PodName: worker.pod.Name,
Namespace: worker.namespace,
}
}
// IsTimeout returns true if worker is timeout
// and returns the time left until it is due
func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {
now := time.Now()
if now.After(worker.dueTime) {
return true, time.Duration(0)
}
return false, worker.dueTime.Sub(now)
}
// Terminate terminates the worker and destroy it
func (worker *K8SPodWorker) Terminate() error {
client := worker.Client().CoreV1().Pods(worker.namespace)
GracePeriodSeconds := int64(0)
logdog.Debug("worker terminating...", logdog.Fields{"cloud": worker.Name(), "kind": worker.Kind(), "podName": worker.pod.Name})
if Debug {
req := client.GetLogs(worker.pod.Name, &apiv1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
logdog.Error("Can not read log from pod", logdog.Fields{
"cloud": worker.Name(),
"kind": worker.Kind(),
"podName": worker.pod.Name,
"err": err,
})
} else {
defer readCloser.Close()
content, _ := ioutil.ReadAll(readCloser)
logdog.Debug(string(content))
}
}
err := client.Delete(
worker.pod.Name,
&meta_v1.DeleteOptions{
GracePeriodSeconds: &GracePeriodSeconds,
})
return err
}
func buildK8SEnv(id string, opts WorkerOptions) []apiv1.EnvVar | {
env := []apiv1.EnvVar{
{
Name: WorkerEventID,
Value: id,
},
{
Name: CycloneServer,
Value: opts.WorkerEnvs.CycloneServer,
},
{
Name: ConsoleWebEndpoint,
Value: opts.WorkerEnvs.ConsoleWebEndpoint,
},
{
Name: RegistryLocation,
Value: opts.WorkerEnvs.RegistryLocation,
},
{
Name: RegistryUsername,
Value: opts.WorkerEnvs.RegistryUsername,
},
{
Name: RegistryPassword,
Value: opts.WorkerEnvs.RegistryPassword,
},
{
Name: GitlabURL,
Value: opts.WorkerEnvs.GitlabURL,
},
{
Name: LogServer,
Value: opts.WorkerEnvs.LogServer,
},
{
Name: WorkerImage,
Value: opts.WorkerEnvs.WorkerImage,
},
{
Name: LimitCPU,
Value: opts.Quota[ResourceLimitsCPU].String(),
},
{
Name: LimitMemory,
Value: opts.Quota[ResourceLimitsMemory].String(),
},
{
Name: RequestCPU,
Value: opts.Quota[ResourceRequestsCPU].String(),
},
{
Name: RequestMemory,
Value: opts.Quota[ResourceRequestsMemory].String(),
},
}
return env
} | identifier_body |
|
k8s.go | /*
Copyright 2016 caicloud authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/zoumo/logdog"
apiv1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// K8SCloud ...
type K8SCloud struct {
name string
host string
bearerToken string
namespace string
insecure bool
inCluster bool
client *kubernetes.Clientset
}
// NewK8SCloud ...
func NewK8SCloud(opts Options) (CloudProvider, error) {
if opts.K8SInCluster == true {
return NewK8SCloudInCluster(opts)
}
return newK8SCloud(opts)
}
// newK8SCloud returns a cloud object which uses the Options
func newK8SCloud(opts Options) (CloudProvider, error) {
if opts.Name == "" {
return nil, errors.New("K8SCloud: Invalid cloud name")
}
if opts.Host == "" {
return nil, errors.New("K8SCloud: Invalid cloud host")
}
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name | return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) Resource() (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used {
resource.Used[string(k)] = NewQuantityFor(v)
}
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase)
}
}
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Create(worker.pod)
if err != nil {
return err
}
// wait until pod is running
err = wait.Poll(7*time.Second, 2*time.Minute, check)
if err != nil {
logdog.Error("K8SPodWorker: do worker error", logdog.Fields{"err": err})
return err
}
// add time
worker.createTime = time.Now()
worker.dueTime = worker.createTime.Add(time.Duration(WorkerTimeout))
worker.pod = pod
return nil
}
// GetWorkerInfo returns worker's infomation
func (worker *K8SPodWorker) GetWorkerInfo() WorkerInfo {
return WorkerInfo{
CloudName: worker.Name(),
CloudKind: worker.Kind(),
CreateTime: worker.createTime,
DueTime: worker.dueTime,
PodName: worker.pod.Name,
Namespace: worker.namespace,
}
}
// IsTimeout returns true if worker is timeout
// and returns the time left until it is due
func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {
now := time.Now()
if now.After(worker.dueTime) {
return true, time.Duration(0)
}
return false, worker.dueTime.Sub(now)
}
// Terminate terminates the worker and destroy it
func (worker *K8SPodWorker) Terminate() error {
client := worker.Client().CoreV1().Pods(worker.namespace)
GracePeriodSeconds := int64(0)
logdog.Debug("worker terminating...", logdog.Fields{"cloud": worker.Name(), "kind": worker.Kind(), "podName": worker.pod.Name})
if Debug {
req := client.GetLogs(worker.pod.Name, &apiv1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
logdog.Error("Can not read log from pod", logdog.Fields{
"cloud": worker.Name(),
"kind": worker.Kind(),
"podName": worker.pod.Name,
"err": err,
})
} else {
defer readCloser.Close()
content, _ := ioutil.ReadAll(readCloser)
logdog.Debug(string(content))
}
}
err := client.Delete(
worker.pod.Name,
&meta_v1.DeleteOptions{
GracePeriodSeconds: &GracePeriodSeconds,
})
return err
}
func buildK8SEnv(id string, opts WorkerOptions) []apiv1.EnvVar {
env := []apiv1.EnvVar{
{
Name: WorkerEventID,
Value: id,
},
{
Name: CycloneServer,
Value: opts.WorkerEnvs.CycloneServer,
},
{
Name: ConsoleWebEndpoint,
Value: opts.WorkerEnvs.ConsoleWebEndpoint,
},
{
Name: RegistryLocation,
Value: opts.WorkerEnvs.RegistryLocation,
},
{
Name: RegistryUsername,
Value: opts.WorkerEnvs.RegistryUsername,
},
{
Name: RegistryPassword,
Value: opts.WorkerEnvs.RegistryPassword,
},
{
Name: GitlabURL,
Value: opts.WorkerEnvs.GitlabURL,
},
{
Name: LogServer,
Value: opts.WorkerEnvs.LogServer,
},
{
Name: WorkerImage,
Value: opts.WorkerEnvs.WorkerImage,
},
{
Name: LimitCPU,
Value: opts.Quota[ResourceLimitsCPU].String(),
},
{
Name: LimitMemory,
Value: opts.Quota[ResourceLimitsMemory].String(),
},
{
Name: RequestCPU,
Value: opts.Quota[ResourceRequestsCPU].String(),
},
{
Name: RequestMemory,
Value: opts.Quota[ResourceRequestsMemory].String(),
},
}
return env
} | func (cloud *K8SCloud) Name() string { | random_line_split |
solve_kami.go | // Accepts iPhone screenshots of a Kami gameboard and prints a solution.
// See https://itunes.apple.com/us/app/kami/id710724007
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/png"
"log"
"math"
"os"
"sort"
"strconv"
colorful "github.com/lucasb-eyer/go-colorful"
)
const (
ROWS = 16
COLS = 10
MAX_COLOR = 10
)
type empty struct{}
// Creates a two-dimensional slice of ints with r rows and c cols,
// ensuringe the rows are in contiguous memory.
func newIntMatrix(r, c int) [][]int {
mem := make([]int, r*c)
mat := make([][]int, r)
for i := 0; i < r; i++ {
mat[i], mem = mem[:c], mem[c:]
}
return mat
}
func processImage(src image.Image, numColors int) *Board {
img := convertToRGBA(src)
b := img.Bounds()
tileSize := b.Dx() / COLS
m := image.NewRGBA(image.Rect(0, 0, COLS*tileSize, ROWS*tileSize))
draw.Draw(m, m.Bounds(), &image.Uniform{color.Black}, image.ZP, draw.Src)
grid := newIntMatrix(ROWS, COLS)
swatches := extractSwatches(img, numColors)
for col := 0; col < COLS; col++ {
for row := 0; row < ROWS; row++ {
sr := image.Rect(
b.Min.X+col*tileSize, b.Min.Y+row*tileSize,
b.Min.X+(col+1)*tileSize, b.Min.Y+(row+1)*tileSize,
)
sr = trimRect(sr, 5)
c, x := nearestSwatch(swatches, averageColor(img.SubImage(sr)))
dr := image.Rect(col*tileSize, row*tileSize, (col+1)*tileSize, (row+1)*tileSize)
dr = trimRect(dr, 5)
draw.Draw(m, dr, &image.Uniform{c}, dr.Min, draw.Src)
grid[row][col] = x + 1
}
}
savePNG("processed", m)
findRegions(grid)
return newBoard(grid)
}
// Shrinks the rectangle by n pixels in each dimension.
func trimRect(r image.Rectangle, n int) image.Rectangle {
return image.Rect(r.Min.X+n, r.Min.Y+n, r.Max.X-n, r.Max.Y-n)
}
// Processes the swatches section of the image board. Also creates some
// files in /tmp for diagnostic purposes.
func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func savePNG(name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet {
a := make([]int, len(s))
copy(a, s)
return a
}
func (s intSet) contains(x int) bool {
for _, y := range s {
if x == y {
return true
}
}
return false
}
func (s *intSet) add(x int) {
if !s.contains(x) {
*s = append(*s, x)
}
}
func (s *intSet) remove(x int) {
for i, y := range *s {
if x == y {
(*s)[i] = (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return
}
}
}
func getRegions(b *Board) []*Region {
a := make([]*Region, 0, len(b.regions))
for _, r := range b.regions {
a = append(a, r)
}
sort.Sort(byNumNeighbors(a))
return a
}
type byNumNeighbors []*Region
func (a byNumNeighbors) Len() int { return len(a) }
func (a byNumNeighbors) Less(i, j int) bool { return len(a[i].neighbors) > len(a[j].neighbors) }
func (a byNumNeighbors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type Move struct {
tile Tile
color int
}
type work struct {
board *Board
regionID int
color int
movesLeft int
}
func search(b *Board, regionID int, movesLeft int) []Move {
switch {
case b.numColors() > movesLeft+1:
return nil
case b.solved():
return []Move{}
case movesLeft <= 0:
return nil
}
r := b.regions[regionID]
for _, color := range b.colorsAdjacentToRegion(r) {
moves := search(b.recolor(r.id, color), r.id, movesLeft-1)
if moves != nil {
return append(moves, Move{r.tile, color})
}
}
return nil
}
func workerProcess(c1 <-chan work, c2 chan<- []Move) {
for work := range c1 {
newBoard := work.board.recolor(work.regionID, work.color)
moves := search(newBoard, work.regionID, work.movesLeft-1)
if moves != nil {
r := newBoard.regions[work.regionID]
c2 <- append(moves, Move{r.tile, work.color})
return
}
}
c2 <- nil
}
func solve(b *Board, maxMoves int, numWorkers int) []Move {
workChan := make(chan work)
solutionChan := make(chan []Move)
// Launch consumers
for i := 0; i < numWorkers; i++ {
go workerProcess(workChan, solutionChan)
}
// Launch producer
go func() {
for _, region := range getRegions(b) {
colors := b.colorsAdjacentToRegion(region)
for _, color := range colors {
fmt.Printf("go region %3d: color %d -> %d\n", region.id, region.color, color)
workChan <- work{
board: b,
regionID: region.id,
color: color,
movesLeft: maxMoves,
}
}
}
close(workChan) // no more work
}() | moves := <-solutionChan
if moves != nil {
for i, j := 0, len(moves)-1; i < j; i, j = i+1, j-1 {
moves[i], moves[j] = moves[j], moves[i]
}
return moves
}
}
return nil
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 3 {
fmt.Printf("usage: gokami <ncolors> <nmoves> <filename>\n")
os.Exit(0)
}
numColors := parseInt(args[0])
numMoves := parseInt(args[1])
filename := args[2]
board := processImage(loadPNG(filename), numColors)
printBoard(board)
fmt.Println("")
solution := solve(board, numMoves, 8)
fmt.Println("")
for i, move := range solution {
fmt.Printf("move %2d: (%d, %d) -> %d\n", i+1, move.tile.row, move.tile.col, move.color)
}
}
func printBoard(b *Board) {
var ids []int
for _, r := range b.regions {
ids = append(ids, r.id)
}
sort.Ints(ids)
for _, id := range ids {
r := b.regions[id]
fmt.Printf("region %3d: (%2d, %2d) -> %d %v\n", id, r.tile.row, r.tile.col, r.color, r.neighbors)
}
} |
// Wait for a solution
for i := 0; i < numWorkers; i++ { | random_line_split |
solve_kami.go | // Accepts iPhone screenshots of a Kami gameboard and prints a solution.
// See https://itunes.apple.com/us/app/kami/id710724007
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/png"
"log"
"math"
"os"
"sort"
"strconv"
colorful "github.com/lucasb-eyer/go-colorful"
)
const (
ROWS = 16
COLS = 10
MAX_COLOR = 10
)
type empty struct{}
// Creates a two-dimensional slice of ints with r rows and c cols,
// ensuringe the rows are in contiguous memory.
func newIntMatrix(r, c int) [][]int {
mem := make([]int, r*c)
mat := make([][]int, r)
for i := 0; i < r; i++ {
mat[i], mem = mem[:c], mem[c:]
}
return mat
}
func processImage(src image.Image, numColors int) *Board {
img := convertToRGBA(src)
b := img.Bounds()
tileSize := b.Dx() / COLS
m := image.NewRGBA(image.Rect(0, 0, COLS*tileSize, ROWS*tileSize))
draw.Draw(m, m.Bounds(), &image.Uniform{color.Black}, image.ZP, draw.Src)
grid := newIntMatrix(ROWS, COLS)
swatches := extractSwatches(img, numColors)
for col := 0; col < COLS; col++ {
for row := 0; row < ROWS; row++ {
sr := image.Rect(
b.Min.X+col*tileSize, b.Min.Y+row*tileSize,
b.Min.X+(col+1)*tileSize, b.Min.Y+(row+1)*tileSize,
)
sr = trimRect(sr, 5)
c, x := nearestSwatch(swatches, averageColor(img.SubImage(sr)))
dr := image.Rect(col*tileSize, row*tileSize, (col+1)*tileSize, (row+1)*tileSize)
dr = trimRect(dr, 5)
draw.Draw(m, dr, &image.Uniform{c}, dr.Min, draw.Src)
grid[row][col] = x + 1
}
}
savePNG("processed", m)
findRegions(grid)
return newBoard(grid)
}
// Shrinks the rectangle by n pixels in each dimension.
func trimRect(r image.Rectangle, n int) image.Rectangle {
return image.Rect(r.Min.X+n, r.Min.Y+n, r.Max.X-n, r.Max.Y-n)
}
// Processes the swatches section of the image board. Also creates some
// files in /tmp for diagnostic purposes.
func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func savePNG(name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] |
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet {
a := make([]int, len(s))
copy(a, s)
return a
}
func (s intSet) contains(x int) bool {
for _, y := range s {
if x == y {
return true
}
}
return false
}
func (s *intSet) add(x int) {
if !s.contains(x) {
*s = append(*s, x)
}
}
func (s *intSet) remove(x int) {
for i, y := range *s {
if x == y {
(*s)[i] = (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return
}
}
}
func getRegions(b *Board) []*Region {
a := make([]*Region, 0, len(b.regions))
for _, r := range b.regions {
a = append(a, r)
}
sort.Sort(byNumNeighbors(a))
return a
}
type byNumNeighbors []*Region
func (a byNumNeighbors) Len() int { return len(a) }
func (a byNumNeighbors) Less(i, j int) bool { return len(a[i].neighbors) > len(a[j].neighbors) }
func (a byNumNeighbors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type Move struct {
tile Tile
color int
}
type work struct {
board *Board
regionID int
color int
movesLeft int
}
func search(b *Board, regionID int, movesLeft int) []Move {
switch {
case b.numColors() > movesLeft+1:
return nil
case b.solved():
return []Move{}
case movesLeft <= 0:
return nil
}
r := b.regions[regionID]
for _, color := range b.colorsAdjacentToRegion(r) {
moves := search(b.recolor(r.id, color), r.id, movesLeft-1)
if moves != nil {
return append(moves, Move{r.tile, color})
}
}
return nil
}
func workerProcess(c1 <-chan work, c2 chan<- []Move) {
for work := range c1 {
newBoard := work.board.recolor(work.regionID, work.color)
moves := search(newBoard, work.regionID, work.movesLeft-1)
if moves != nil {
r := newBoard.regions[work.regionID]
c2 <- append(moves, Move{r.tile, work.color})
return
}
}
c2 <- nil
}
func solve(b *Board, maxMoves int, numWorkers int) []Move {
workChan := make(chan work)
solutionChan := make(chan []Move)
// Launch consumers
for i := 0; i < numWorkers; i++ {
go workerProcess(workChan, solutionChan)
}
// Launch producer
go func() {
for _, region := range getRegions(b) {
colors := b.colorsAdjacentToRegion(region)
for _, color := range colors {
fmt.Printf("go region %3d: color %d -> %d\n", region.id, region.color, color)
workChan <- work{
board: b,
regionID: region.id,
color: color,
movesLeft: maxMoves,
}
}
}
close(workChan) // no more work
}()
// Wait for a solution
for i := 0; i < numWorkers; i++ {
moves := <-solutionChan
if moves != nil {
for i, j := 0, len(moves)-1; i < j; i, j = i+1, j-1 {
moves[i], moves[j] = moves[j], moves[i]
}
return moves
}
}
return nil
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 3 {
fmt.Printf("usage: gokami <ncolors> <nmoves> <filename>\n")
os.Exit(0)
}
numColors := parseInt(args[0])
numMoves := parseInt(args[1])
filename := args[2]
board := processImage(loadPNG(filename), numColors)
printBoard(board)
fmt.Println("")
solution := solve(board, numMoves, 8)
fmt.Println("")
for i, move := range solution {
fmt.Printf("move %2d: (%d, %d) -> %d\n", i+1, move.tile.row, move.tile.col, move.color)
}
}
func printBoard(b *Board) {
var ids []int
for _, r := range b.regions {
ids = append(ids, r.id)
}
sort.Ints(ids)
for _, id := range ids {
r := b.regions[id]
fmt.Printf("region %3d: (%2d, %2d) -> %d %v\n", id, r.tile.row, r.tile.col, r.color, r.neighbors)
}
}
| {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
} | conditional_block |
solve_kami.go | // Accepts iPhone screenshots of a Kami gameboard and prints a solution.
// See https://itunes.apple.com/us/app/kami/id710724007
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/png"
"log"
"math"
"os"
"sort"
"strconv"
colorful "github.com/lucasb-eyer/go-colorful"
)
const (
ROWS = 16
COLS = 10
MAX_COLOR = 10
)
type empty struct{}
// Creates a two-dimensional slice of ints with r rows and c cols,
// ensuringe the rows are in contiguous memory.
func newIntMatrix(r, c int) [][]int {
mem := make([]int, r*c)
mat := make([][]int, r)
for i := 0; i < r; i++ {
mat[i], mem = mem[:c], mem[c:]
}
return mat
}
func processImage(src image.Image, numColors int) *Board {
img := convertToRGBA(src)
b := img.Bounds()
tileSize := b.Dx() / COLS
m := image.NewRGBA(image.Rect(0, 0, COLS*tileSize, ROWS*tileSize))
draw.Draw(m, m.Bounds(), &image.Uniform{color.Black}, image.ZP, draw.Src)
grid := newIntMatrix(ROWS, COLS)
swatches := extractSwatches(img, numColors)
for col := 0; col < COLS; col++ {
for row := 0; row < ROWS; row++ {
sr := image.Rect(
b.Min.X+col*tileSize, b.Min.Y+row*tileSize,
b.Min.X+(col+1)*tileSize, b.Min.Y+(row+1)*tileSize,
)
sr = trimRect(sr, 5)
c, x := nearestSwatch(swatches, averageColor(img.SubImage(sr)))
dr := image.Rect(col*tileSize, row*tileSize, (col+1)*tileSize, (row+1)*tileSize)
dr = trimRect(dr, 5)
draw.Draw(m, dr, &image.Uniform{c}, dr.Min, draw.Src)
grid[row][col] = x + 1
}
}
savePNG("processed", m)
findRegions(grid)
return newBoard(grid)
}
// Shrinks the rectangle by n pixels in each dimension.
func trimRect(r image.Rectangle, n int) image.Rectangle {
return image.Rect(r.Min.X+n, r.Min.Y+n, r.Max.X-n, r.Max.Y-n)
}
// Processes the swatches section of the image board. Also creates some
// files in /tmp for diagnostic purposes.
func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func savePNG(name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet |
func (s intSet) contains(x int) bool {
for _, y := range s {
if x == y {
return true
}
}
return false
}
func (s *intSet) add(x int) {
if !s.contains(x) {
*s = append(*s, x)
}
}
func (s *intSet) remove(x int) {
for i, y := range *s {
if x == y {
(*s)[i] = (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return
}
}
}
func getRegions(b *Board) []*Region {
a := make([]*Region, 0, len(b.regions))
for _, r := range b.regions {
a = append(a, r)
}
sort.Sort(byNumNeighbors(a))
return a
}
type byNumNeighbors []*Region
func (a byNumNeighbors) Len() int { return len(a) }
func (a byNumNeighbors) Less(i, j int) bool { return len(a[i].neighbors) > len(a[j].neighbors) }
func (a byNumNeighbors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type Move struct {
tile Tile
color int
}
type work struct {
board *Board
regionID int
color int
movesLeft int
}
func search(b *Board, regionID int, movesLeft int) []Move {
switch {
case b.numColors() > movesLeft+1:
return nil
case b.solved():
return []Move{}
case movesLeft <= 0:
return nil
}
r := b.regions[regionID]
for _, color := range b.colorsAdjacentToRegion(r) {
moves := search(b.recolor(r.id, color), r.id, movesLeft-1)
if moves != nil {
return append(moves, Move{r.tile, color})
}
}
return nil
}
func workerProcess(c1 <-chan work, c2 chan<- []Move) {
for work := range c1 {
newBoard := work.board.recolor(work.regionID, work.color)
moves := search(newBoard, work.regionID, work.movesLeft-1)
if moves != nil {
r := newBoard.regions[work.regionID]
c2 <- append(moves, Move{r.tile, work.color})
return
}
}
c2 <- nil
}
func solve(b *Board, maxMoves int, numWorkers int) []Move {
workChan := make(chan work)
solutionChan := make(chan []Move)
// Launch consumers
for i := 0; i < numWorkers; i++ {
go workerProcess(workChan, solutionChan)
}
// Launch producer
go func() {
for _, region := range getRegions(b) {
colors := b.colorsAdjacentToRegion(region)
for _, color := range colors {
fmt.Printf("go region %3d: color %d -> %d\n", region.id, region.color, color)
workChan <- work{
board: b,
regionID: region.id,
color: color,
movesLeft: maxMoves,
}
}
}
close(workChan) // no more work
}()
// Wait for a solution
for i := 0; i < numWorkers; i++ {
moves := <-solutionChan
if moves != nil {
for i, j := 0, len(moves)-1; i < j; i, j = i+1, j-1 {
moves[i], moves[j] = moves[j], moves[i]
}
return moves
}
}
return nil
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 3 {
fmt.Printf("usage: gokami <ncolors> <nmoves> <filename>\n")
os.Exit(0)
}
numColors := parseInt(args[0])
numMoves := parseInt(args[1])
filename := args[2]
board := processImage(loadPNG(filename), numColors)
printBoard(board)
fmt.Println("")
solution := solve(board, numMoves, 8)
fmt.Println("")
for i, move := range solution {
fmt.Printf("move %2d: (%d, %d) -> %d\n", i+1, move.tile.row, move.tile.col, move.color)
}
}
func printBoard(b *Board) {
var ids []int
for _, r := range b.regions {
ids = append(ids, r.id)
}
sort.Ints(ids)
for _, id := range ids {
r := b.regions[id]
fmt.Printf("region %3d: (%2d, %2d) -> %d %v\n", id, r.tile.row, r.tile.col, r.color, r.neighbors)
}
}
| {
a := make([]int, len(s))
copy(a, s)
return a
} | identifier_body |
solve_kami.go | // Accepts iPhone screenshots of a Kami gameboard and prints a solution.
// See https://itunes.apple.com/us/app/kami/id710724007
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/png"
"log"
"math"
"os"
"sort"
"strconv"
colorful "github.com/lucasb-eyer/go-colorful"
)
const (
ROWS = 16
COLS = 10
MAX_COLOR = 10
)
type empty struct{}
// Creates a two-dimensional slice of ints with r rows and c cols,
// ensuringe the rows are in contiguous memory.
func newIntMatrix(r, c int) [][]int {
mem := make([]int, r*c)
mat := make([][]int, r)
for i := 0; i < r; i++ {
mat[i], mem = mem[:c], mem[c:]
}
return mat
}
func processImage(src image.Image, numColors int) *Board {
img := convertToRGBA(src)
b := img.Bounds()
tileSize := b.Dx() / COLS
m := image.NewRGBA(image.Rect(0, 0, COLS*tileSize, ROWS*tileSize))
draw.Draw(m, m.Bounds(), &image.Uniform{color.Black}, image.ZP, draw.Src)
grid := newIntMatrix(ROWS, COLS)
swatches := extractSwatches(img, numColors)
for col := 0; col < COLS; col++ {
for row := 0; row < ROWS; row++ {
sr := image.Rect(
b.Min.X+col*tileSize, b.Min.Y+row*tileSize,
b.Min.X+(col+1)*tileSize, b.Min.Y+(row+1)*tileSize,
)
sr = trimRect(sr, 5)
c, x := nearestSwatch(swatches, averageColor(img.SubImage(sr)))
dr := image.Rect(col*tileSize, row*tileSize, (col+1)*tileSize, (row+1)*tileSize)
dr = trimRect(dr, 5)
draw.Draw(m, dr, &image.Uniform{c}, dr.Min, draw.Src)
grid[row][col] = x + 1
}
}
savePNG("processed", m)
findRegions(grid)
return newBoard(grid)
}
// Shrinks the rectangle by n pixels in each dimension.
func trimRect(r image.Rectangle, n int) image.Rectangle {
return image.Rect(r.Min.X+n, r.Min.Y+n, r.Max.X-n, r.Max.Y-n)
}
// Processes the swatches section of the image board. Also creates some
// files in /tmp for diagnostic purposes.
func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func | (name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet {
a := make([]int, len(s))
copy(a, s)
return a
}
func (s intSet) contains(x int) bool {
for _, y := range s {
if x == y {
return true
}
}
return false
}
func (s *intSet) add(x int) {
if !s.contains(x) {
*s = append(*s, x)
}
}
func (s *intSet) remove(x int) {
for i, y := range *s {
if x == y {
(*s)[i] = (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return
}
}
}
func getRegions(b *Board) []*Region {
a := make([]*Region, 0, len(b.regions))
for _, r := range b.regions {
a = append(a, r)
}
sort.Sort(byNumNeighbors(a))
return a
}
type byNumNeighbors []*Region
func (a byNumNeighbors) Len() int { return len(a) }
func (a byNumNeighbors) Less(i, j int) bool { return len(a[i].neighbors) > len(a[j].neighbors) }
func (a byNumNeighbors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type Move struct {
tile Tile
color int
}
type work struct {
board *Board
regionID int
color int
movesLeft int
}
func search(b *Board, regionID int, movesLeft int) []Move {
switch {
case b.numColors() > movesLeft+1:
return nil
case b.solved():
return []Move{}
case movesLeft <= 0:
return nil
}
r := b.regions[regionID]
for _, color := range b.colorsAdjacentToRegion(r) {
moves := search(b.recolor(r.id, color), r.id, movesLeft-1)
if moves != nil {
return append(moves, Move{r.tile, color})
}
}
return nil
}
func workerProcess(c1 <-chan work, c2 chan<- []Move) {
for work := range c1 {
newBoard := work.board.recolor(work.regionID, work.color)
moves := search(newBoard, work.regionID, work.movesLeft-1)
if moves != nil {
r := newBoard.regions[work.regionID]
c2 <- append(moves, Move{r.tile, work.color})
return
}
}
c2 <- nil
}
func solve(b *Board, maxMoves int, numWorkers int) []Move {
workChan := make(chan work)
solutionChan := make(chan []Move)
// Launch consumers
for i := 0; i < numWorkers; i++ {
go workerProcess(workChan, solutionChan)
}
// Launch producer
go func() {
for _, region := range getRegions(b) {
colors := b.colorsAdjacentToRegion(region)
for _, color := range colors {
fmt.Printf("go region %3d: color %d -> %d\n", region.id, region.color, color)
workChan <- work{
board: b,
regionID: region.id,
color: color,
movesLeft: maxMoves,
}
}
}
close(workChan) // no more work
}()
// Wait for a solution
for i := 0; i < numWorkers; i++ {
moves := <-solutionChan
if moves != nil {
for i, j := 0, len(moves)-1; i < j; i, j = i+1, j-1 {
moves[i], moves[j] = moves[j], moves[i]
}
return moves
}
}
return nil
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 3 {
fmt.Printf("usage: gokami <ncolors> <nmoves> <filename>\n")
os.Exit(0)
}
numColors := parseInt(args[0])
numMoves := parseInt(args[1])
filename := args[2]
board := processImage(loadPNG(filename), numColors)
printBoard(board)
fmt.Println("")
solution := solve(board, numMoves, 8)
fmt.Println("")
for i, move := range solution {
fmt.Printf("move %2d: (%d, %d) -> %d\n", i+1, move.tile.row, move.tile.col, move.color)
}
}
func printBoard(b *Board) {
var ids []int
for _, r := range b.regions {
ids = append(ids, r.id)
}
sort.Ints(ids)
for _, id := range ids {
r := b.regions[id]
fmt.Printf("region %3d: (%2d, %2d) -> %d %v\n", id, r.tile.row, r.tile.col, r.color, r.neighbors)
}
}
| savePNG | identifier_name |
sexprs.go | // Copyright 2013 Robert A. Uhl. All rights reserved.
// Use of this source code is governed by an MIT-style license which may
// be found in the LICENSE file.
// Package sexprs implements Ron Rivest's canonical S-expressions
// (c.f. http://people.csail.mit.edu/rivest/Sexp.txt or
// rivest-draft.txt in this package) in Go. I'm indebted to Inferno's
// sexprs(2), whose API I first accidentally, and then deliberately,
// mimicked. I've copied much of its style, only making it more
// Go-like.
//
// Canonical S-expressions are a compact, easy-to-parse, ordered,
// hashable data representation ideal for cryptographic operations.
// They are simpler and more compact than either JSON or XML.
//
// An S-expression is composed of lists and atoms. An atom is a string
// of bytes, with an optional display hint, also a byte string. A list
// can contain zero or more atoms or lists.
//
// There are two representations of an S-expression: the canonical
// representation is a byte-oriented, packed representation, while the
// advanced representation is string-oriented and more traditional in
// appearance.
//
// The S-expression ("foo" "bar" ["bin"]"baz quux") is canonically:
// (3:foo3:bar[3:bin]8:quux)
//
// Among the valid advanced representations are:
// (foo 3:bar [bin]"baz quux")
// and:
// ("foo" #626172# [3:bin]|YmF6IHF1dXg=|)
//
// There is also a transport encoding (intended for use in 7-bit transport
// modes), delimited with {}:
// {KDM6Zm9vMzpiYXJbMzpiaW5dODpiYXogcXV1eCk=}
//
package sexprs
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"strconv"
)
var (
lowerCase = []byte("abcdefghijklmnopqrstuvwxyz")
upperCase = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
decimalDigit = []byte("0123456789")
alpha = append(lowerCase, upperCase...)
hexadecimalDigit = append(decimalDigit, []byte("abcdefABCDEF")...)
octalDigit = []byte("01234567")
simplePunc = []byte("-./_:*+=")
whitespaceChar = []byte(" \t\r\n")
base64Char = append(alpha, append(decimalDigit, []byte("+/=")...)...)
tokenChar = append(alpha, append(decimalDigit, simplePunc...)...)
base64Encoding = base64.StdEncoding
stringChar = append(tokenChar, append(hexadecimalDigit, []byte("\"|#")...)...)
stringEncChar = append(stringChar, []byte("\b\t\v\n\f\r\"'\\ ")...)
)
// Sexp is the interface implemented by both lists and atoms.
type Sexp interface {
// String returns an advanced representation of the object, with
// no line breaks.
String() string
string(*bytes.Buffer)
// Base64String returns a transport-encoded rendering of the
// S-expression
Base64String() string
// Pack returns the canonical representation of the object. It
// will always return the same sequence of bytes for the same
// object.
Pack() []byte
pack(*bytes.Buffer)
// PackedLen returns the size in bytes of the canonical
// representation.
PackedLen() int
// Equal will return true if its receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
}
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l |
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, hex.DecodedLen(len(acc)))
n, err := hex.Decode(s, acc)
return s[:n], err
}
func readBase64(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('|')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(s, acc)
return s[:n], err
}
type quoteState int
const (
inQuote quoteState = iota
inEscape
inNewlineEscape
inReturnEscape
inHex1
inHex2
inOctal1
inOctal2
inOctal3
)
func readQuotedString(r *bufio.Reader, length int) (s []byte, err error) {
var acc, escape []byte
if length >= 0 {
acc = make([]byte, 0, length)
} else {
acc = make([]byte, 0)
}
escape = make([]byte, 3)
state := inQuote
for c, err := r.ReadByte(); err == nil; c, err = r.ReadByte() {
switch state {
case inQuote:
switch c {
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, err
case '\\':
state = inEscape
default:
acc = append(acc, c)
}
case inEscape:
switch c {
case byte('b'):
acc = append(acc, '\b')
state = inQuote
case byte('t'):
acc = append(acc, '\t')
state = inQuote
case byte('v'):
acc = append(acc, '\v')
state = inQuote
case byte('n'):
acc = append(acc, '\n')
state = inQuote
case byte('f'):
acc = append(acc, '\f')
state = inQuote
case byte('r'):
acc = append(acc, '\r')
state = inQuote
case byte('"'):
acc = append(acc, '"')
state = inQuote
case byte('\''):
acc = append(acc, '\'')
state = inQuote
case byte('\\'):
acc = append(acc, '\\')
state = inQuote
case byte('\n'):
state = inNewlineEscape
case '\r':
state = inReturnEscape
case byte('x'):
state = inHex1
default:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal2
escape[0] = c
} else {
return nil, fmt.Errorf("Unrecognised escape character %c", rune(c))
}
state = inQuote
}
case inNewlineEscape:
switch c {
case '\r':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inReturnEscape:
switch c {
case '\n':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inHex1:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inHex2
escape[0] = c
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inHex2:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 16, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inOctal2:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal3
escape[1] = c
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
case inOctal3:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 8, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
}
}
return nil, fmt.Errorf("Unterminated string")
}
| {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
} | conditional_block |
sexprs.go | // Copyright 2013 Robert A. Uhl. All rights reserved.
// Use of this source code is governed by an MIT-style license which may
// be found in the LICENSE file.
// Package sexprs implements Ron Rivest's canonical S-expressions
// (c.f. http://people.csail.mit.edu/rivest/Sexp.txt or
// rivest-draft.txt in this package) in Go. I'm indebted to Inferno's
// sexprs(2), whose API I first accidentally, and then deliberately,
// mimicked. I've copied much of its style, only making it more
// Go-like.
//
// Canonical S-expressions are a compact, easy-to-parse, ordered,
// hashable data representation ideal for cryptographic operations.
// They are simpler and more compact than either JSON or XML.
//
// An S-expression is composed of lists and atoms. An atom is a string
// of bytes, with an optional display hint, also a byte string. A list
// can contain zero or more atoms or lists.
//
// There are two representations of an S-expression: the canonical
// representation is a byte-oriented, packed representation, while the
// advanced representation is string-oriented and more traditional in
// appearance.
//
// The S-expression ("foo" "bar" ["bin"]"baz quux") is canonically:
// (3:foo3:bar[3:bin]8:quux)
//
// Among the valid advanced representations are:
// (foo 3:bar [bin]"baz quux")
// and:
// ("foo" #626172# [3:bin]|YmF6IHF1dXg=|)
//
// There is also a transport encoding (intended for use in 7-bit transport
// modes), delimited with {}:
// {KDM6Zm9vMzpiYXJbMzpiaW5dODpiYXogcXV1eCk=}
//
package sexprs
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"strconv"
)
var (
lowerCase = []byte("abcdefghijklmnopqrstuvwxyz")
upperCase = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
decimalDigit = []byte("0123456789")
alpha = append(lowerCase, upperCase...)
hexadecimalDigit = append(decimalDigit, []byte("abcdefABCDEF")...)
octalDigit = []byte("01234567")
simplePunc = []byte("-./_:*+=")
whitespaceChar = []byte(" \t\r\n")
base64Char = append(alpha, append(decimalDigit, []byte("+/=")...)...)
tokenChar = append(alpha, append(decimalDigit, simplePunc...)...)
base64Encoding = base64.StdEncoding
stringChar = append(tokenChar, append(hexadecimalDigit, []byte("\"|#")...)...)
stringEncChar = append(stringChar, []byte("\b\t\v\n\f\r\"'\\ ")...)
)
// Sexp is the interface implemented by both lists and atoms.
type Sexp interface {
// String returns an advanced representation of the object, with
// no line breaks.
String() string
string(*bytes.Buffer)
// Base64String returns a transport-encoded rendering of the
// S-expression
Base64String() string
// Pack returns the canonical representation of the object. It
// will always return the same sequence of bytes for the same
// object.
Pack() []byte
pack(*bytes.Buffer)
// PackedLen returns the size in bytes of the canonical
// representation.
PackedLen() int
// Equal will return true if its receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
}
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] { | if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, hex.DecodedLen(len(acc)))
n, err := hex.Decode(s, acc)
return s[:n], err
}
func readBase64(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('|')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(s, acc)
return s[:n], err
}
type quoteState int
const (
inQuote quoteState = iota
inEscape
inNewlineEscape
inReturnEscape
inHex1
inHex2
inOctal1
inOctal2
inOctal3
)
func readQuotedString(r *bufio.Reader, length int) (s []byte, err error) {
var acc, escape []byte
if length >= 0 {
acc = make([]byte, 0, length)
} else {
acc = make([]byte, 0)
}
escape = make([]byte, 3)
state := inQuote
for c, err := r.ReadByte(); err == nil; c, err = r.ReadByte() {
switch state {
case inQuote:
switch c {
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, err
case '\\':
state = inEscape
default:
acc = append(acc, c)
}
case inEscape:
switch c {
case byte('b'):
acc = append(acc, '\b')
state = inQuote
case byte('t'):
acc = append(acc, '\t')
state = inQuote
case byte('v'):
acc = append(acc, '\v')
state = inQuote
case byte('n'):
acc = append(acc, '\n')
state = inQuote
case byte('f'):
acc = append(acc, '\f')
state = inQuote
case byte('r'):
acc = append(acc, '\r')
state = inQuote
case byte('"'):
acc = append(acc, '"')
state = inQuote
case byte('\''):
acc = append(acc, '\'')
state = inQuote
case byte('\\'):
acc = append(acc, '\\')
state = inQuote
case byte('\n'):
state = inNewlineEscape
case '\r':
state = inReturnEscape
case byte('x'):
state = inHex1
default:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal2
escape[0] = c
} else {
return nil, fmt.Errorf("Unrecognised escape character %c", rune(c))
}
state = inQuote
}
case inNewlineEscape:
switch c {
case '\r':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inReturnEscape:
switch c {
case '\n':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inHex1:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inHex2
escape[0] = c
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inHex2:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 16, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inOctal2:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal3
escape[1] = c
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
case inOctal3:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 8, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
}
}
return nil, fmt.Errorf("Unterminated string")
} | random_line_split |
|
sexprs.go | // Copyright 2013 Robert A. Uhl. All rights reserved.
// Use of this source code is governed by an MIT-style license which may
// be found in the LICENSE file.
// Package sexprs implements Ron Rivest's canonical S-expressions
// (c.f. http://people.csail.mit.edu/rivest/Sexp.txt or
// rivest-draft.txt in this package) in Go. I'm indebted to Inferno's
// sexprs(2), whose API I first accidentally, and then deliberately,
// mimicked. I've copied much of its style, only making it more
// Go-like.
//
// Canonical S-expressions are a compact, easy-to-parse, ordered,
// hashable data representation ideal for cryptographic operations.
// They are simpler and more compact than either JSON or XML.
//
// An S-expression is composed of lists and atoms. An atom is a string
// of bytes, with an optional display hint, also a byte string. A list
// can contain zero or more atoms or lists.
//
// There are two representations of an S-expression: the canonical
// representation is a byte-oriented, packed representation, while the
// advanced representation is string-oriented and more traditional in
// appearance.
//
// The S-expression ("foo" "bar" ["bin"]"baz quux") is canonically:
// (3:foo3:bar[3:bin]8:quux)
//
// Among the valid advanced representations are:
// (foo 3:bar [bin]"baz quux")
// and:
// ("foo" #626172# [3:bin]|YmF6IHF1dXg=|)
//
// There is also a transport encoding (intended for use in 7-bit transport
// modes), delimited with {}:
// {KDM6Zm9vMzpiYXJbMzpiaW5dODpiYXogcXV1eCk=}
//
package sexprs
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"strconv"
)
var (
lowerCase = []byte("abcdefghijklmnopqrstuvwxyz")
upperCase = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
decimalDigit = []byte("0123456789")
alpha = append(lowerCase, upperCase...)
hexadecimalDigit = append(decimalDigit, []byte("abcdefABCDEF")...)
octalDigit = []byte("01234567")
simplePunc = []byte("-./_:*+=")
whitespaceChar = []byte(" \t\r\n")
base64Char = append(alpha, append(decimalDigit, []byte("+/=")...)...)
tokenChar = append(alpha, append(decimalDigit, simplePunc...)...)
base64Encoding = base64.StdEncoding
stringChar = append(tokenChar, append(hexadecimalDigit, []byte("\"|#")...)...)
stringEncChar = append(stringChar, []byte("\b\t\v\n\f\r\"'\\ ")...)
)
// Sexp is the interface implemented by both lists and atoms.
type Sexp interface {
// String returns an advanced representation of the object, with
// no line breaks.
String() string
string(*bytes.Buffer)
// Base64String returns a transport-encoded rendering of the
// S-expression
Base64String() string
// Pack returns the canonical representation of the object. It
// will always return the same sequence of bytes for the same
// object.
Pack() []byte
pack(*bytes.Buffer)
// PackedLen returns the size in bytes of the canonical
// representation.
PackedLen() int
// Equal will return true if its receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
}
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, hex.DecodedLen(len(acc)))
n, err := hex.Decode(s, acc)
return s[:n], err
}
func readBase64(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('|')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(s, acc)
return s[:n], err
}
type quoteState int
const (
inQuote quoteState = iota
inEscape
inNewlineEscape
inReturnEscape
inHex1
inHex2
inOctal1
inOctal2
inOctal3
)
func | (r *bufio.Reader, length int) (s []byte, err error) {
var acc, escape []byte
if length >= 0 {
acc = make([]byte, 0, length)
} else {
acc = make([]byte, 0)
}
escape = make([]byte, 3)
state := inQuote
for c, err := r.ReadByte(); err == nil; c, err = r.ReadByte() {
switch state {
case inQuote:
switch c {
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, err
case '\\':
state = inEscape
default:
acc = append(acc, c)
}
case inEscape:
switch c {
case byte('b'):
acc = append(acc, '\b')
state = inQuote
case byte('t'):
acc = append(acc, '\t')
state = inQuote
case byte('v'):
acc = append(acc, '\v')
state = inQuote
case byte('n'):
acc = append(acc, '\n')
state = inQuote
case byte('f'):
acc = append(acc, '\f')
state = inQuote
case byte('r'):
acc = append(acc, '\r')
state = inQuote
case byte('"'):
acc = append(acc, '"')
state = inQuote
case byte('\''):
acc = append(acc, '\'')
state = inQuote
case byte('\\'):
acc = append(acc, '\\')
state = inQuote
case byte('\n'):
state = inNewlineEscape
case '\r':
state = inReturnEscape
case byte('x'):
state = inHex1
default:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal2
escape[0] = c
} else {
return nil, fmt.Errorf("Unrecognised escape character %c", rune(c))
}
state = inQuote
}
case inNewlineEscape:
switch c {
case '\r':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inReturnEscape:
switch c {
case '\n':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inHex1:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inHex2
escape[0] = c
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inHex2:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 16, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inOctal2:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal3
escape[1] = c
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
case inOctal3:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 8, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
}
}
return nil, fmt.Errorf("Unterminated string")
}
| readQuotedString | identifier_name |
sexprs.go | // Copyright 2013 Robert A. Uhl. All rights reserved.
// Use of this source code is governed by an MIT-style license which may
// be found in the LICENSE file.
// Package sexprs implements Ron Rivest's canonical S-expressions
// (c.f. http://people.csail.mit.edu/rivest/Sexp.txt or
// rivest-draft.txt in this package) in Go. I'm indebted to Inferno's
// sexprs(2), whose API I first accidentally, and then deliberately,
// mimicked. I've copied much of its style, only making it more
// Go-like.
//
// Canonical S-expressions are a compact, easy-to-parse, ordered,
// hashable data representation ideal for cryptographic operations.
// They are simpler and more compact than either JSON or XML.
//
// An S-expression is composed of lists and atoms. An atom is a string
// of bytes, with an optional display hint, also a byte string. A list
// can contain zero or more atoms or lists.
//
// There are two representations of an S-expression: the canonical
// representation is a byte-oriented, packed representation, while the
// advanced representation is string-oriented and more traditional in
// appearance.
//
// The S-expression ("foo" "bar" ["bin"]"baz quux") is canonically:
// (3:foo3:bar[3:bin]8:quux)
//
// Among the valid advanced representations are:
// (foo 3:bar [bin]"baz quux")
// and:
// ("foo" #626172# [3:bin]|YmF6IHF1dXg=|)
//
// There is also a transport encoding (intended for use in 7-bit transport
// modes), delimited with {}:
// {KDM6Zm9vMzpiYXJbMzpiaW5dODpiYXogcXV1eCk=}
//
package sexprs
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"strconv"
)
var (
lowerCase = []byte("abcdefghijklmnopqrstuvwxyz")
upperCase = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
decimalDigit = []byte("0123456789")
alpha = append(lowerCase, upperCase...)
hexadecimalDigit = append(decimalDigit, []byte("abcdefABCDEF")...)
octalDigit = []byte("01234567")
simplePunc = []byte("-./_:*+=")
whitespaceChar = []byte(" \t\r\n")
base64Char = append(alpha, append(decimalDigit, []byte("+/=")...)...)
tokenChar = append(alpha, append(decimalDigit, simplePunc...)...)
base64Encoding = base64.StdEncoding
stringChar = append(tokenChar, append(hexadecimalDigit, []byte("\"|#")...)...)
stringEncChar = append(stringChar, []byte("\b\t\v\n\f\r\"'\\ ")...)
)
// Sexp is the interface implemented by both lists and atoms.
type Sexp interface {
// String returns an advanced representation of the object, with
// no line breaks.
String() string
string(*bytes.Buffer)
// Base64String returns a transport-encoded rendering of the
// S-expression
Base64String() string
// Pack returns the canonical representation of the object. It
// will always return the same sequence of bytes for the same
// object.
Pack() []byte
pack(*bytes.Buffer)
// PackedLen returns the size in bytes of the canonical
// representation.
PackedLen() int
// Equal will return true if its receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string |
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, hex.DecodedLen(len(acc)))
n, err := hex.Decode(s, acc)
return s[:n], err
}
func readBase64(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('|')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(s, acc)
return s[:n], err
}
type quoteState int
const (
inQuote quoteState = iota
inEscape
inNewlineEscape
inReturnEscape
inHex1
inHex2
inOctal1
inOctal2
inOctal3
)
func readQuotedString(r *bufio.Reader, length int) (s []byte, err error) {
var acc, escape []byte
if length >= 0 {
acc = make([]byte, 0, length)
} else {
acc = make([]byte, 0)
}
escape = make([]byte, 3)
state := inQuote
for c, err := r.ReadByte(); err == nil; c, err = r.ReadByte() {
switch state {
case inQuote:
switch c {
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, err
case '\\':
state = inEscape
default:
acc = append(acc, c)
}
case inEscape:
switch c {
case byte('b'):
acc = append(acc, '\b')
state = inQuote
case byte('t'):
acc = append(acc, '\t')
state = inQuote
case byte('v'):
acc = append(acc, '\v')
state = inQuote
case byte('n'):
acc = append(acc, '\n')
state = inQuote
case byte('f'):
acc = append(acc, '\f')
state = inQuote
case byte('r'):
acc = append(acc, '\r')
state = inQuote
case byte('"'):
acc = append(acc, '"')
state = inQuote
case byte('\''):
acc = append(acc, '\'')
state = inQuote
case byte('\\'):
acc = append(acc, '\\')
state = inQuote
case byte('\n'):
state = inNewlineEscape
case '\r':
state = inReturnEscape
case byte('x'):
state = inHex1
default:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal2
escape[0] = c
} else {
return nil, fmt.Errorf("Unrecognised escape character %c", rune(c))
}
state = inQuote
}
case inNewlineEscape:
switch c {
case '\r':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inReturnEscape:
switch c {
case '\n':
// pass
case '"':
if length > 0 && len(acc) != length {
return nil, fmt.Errorf("Length mismatch")
}
return acc, nil
default:
acc = append(acc, c)
}
state = inQuote
case inHex1:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inHex2
escape[0] = c
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inHex2:
if bytes.IndexByte(hexadecimalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 16, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected hexadecimal digit; got %c", c)
}
case inOctal2:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inOctal3
escape[1] = c
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
case inOctal3:
if bytes.IndexByte(octalDigit, c) > -1 {
state = inQuote
escape[2] = c
num, err := strconv.ParseInt(string(escape[:2]), 8, 8)
if err != nil {
return nil, err
}
acc = append(acc, byte(num))
} else {
return nil, fmt.Errorf("Expected octal digit; got %c", c)
}
}
}
return nil, fmt.Errorf("Unterminated string")
}
| {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
} | identifier_body |
dpfile.go | // Public domain, Randall Farmer, 2013
package dpfile
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"github.com/twotwotwo/dltp/alloc"
"github.com/twotwotwo/dltp/diff"
"github.com/twotwotwo/dltp/mwxmlchunk"
sref "github.com/twotwotwo/dltp/sourceref"
"github.com/twotwotwo/dltp/stream"
"github.com/twotwotwo/dltp/zip"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"regexp" // validating input filenames
"runtime"
)
/*
DELTAPACKER FILE FORMAT
DeltaPacker files have a text preamble then binary data. Though the dltp program also
(de)compresses dltp files using bzip2, gzip, etc., that's out of scope here.
The text preamble has the following lines (each ending \n):
- The format name (right now the literal string "DeltaPacker")
- the source URL (now a placeholder)
- the format URL (now a placeholder; if it's updated, we'll print out this URL
as part of the error message)
- a blank line
- a list of files, starting with the output file (only "safe" chars allowed; see
safeFilenameStr regexp below)
- a blank line
They're followed by binary diffs each headed with a source reference, which consists
of three varints (written/read by SourceRef.Write and ReadSource):
source file number (signed; -1 means no source)
start offset (unsigned)
source length (unsigned)
then the 32-bit FNV-1a checksum of the source material (checksums are the only
fixed-size ints in the diff format), then the binary diff, which ends with a 0
instruction (see diff.Patch), then the output checksum.
A source info header with ID, offset, and length all 0 marks the end of the
diffs.
You can see dltp.go for invocation with all the bells and whistles, but use of these
classes goes roughly like:
dpw := dpfile.NewWriter(out, workingDir, sources, [options])
for dpw.WriteSegment() {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string |
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} else {
panicMsg = "checksum mismatch. this looks likely to be a bug in dltp."
}
os.Remove("dltp-error-report.txt")
crashReport, err := os.Create("dltp-error-report.txt")
if err == nil {
// wish filenames etc were available here
fmt.Fprintln(crashReport, panicMsg)
fmt.Fprintln(crashReport, "SourceRef:", source)
crashReport.WriteString("Original text:\n\n")
crashReport.Write(orig)
crashReport.WriteString("\n\nPatched output:\n\n")
crashReport.Write(text)
crashReport.Close()
panicMsg += " wrote additional information to dltp-error-report.txt"
} else {
panicMsg += " couldn't write additional information (" + err.Error() + ")"
}
panic(panicMsg)
}
// write if not ChangeDump or if changed or if this is preamble
if !dpr.ChangeDump || !bytes.Equal(text, orig) || dpr.lastSeg == nil {
_, err := dpr.out.Write(text)
if err != nil {
panic("couldn't write expanded file")
}
}
dpr.lastSeg = text
return true
}
func (dpr *DPReader) Close() {
for _, r := range dpr.sources {
if c, ok := r.(io.Closer); ok {
c.Close()
}
}
dpr.out.Flush()
}
| {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
} | identifier_body |
dpfile.go | // Public domain, Randall Farmer, 2013
package dpfile
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"github.com/twotwotwo/dltp/alloc"
"github.com/twotwotwo/dltp/diff"
"github.com/twotwotwo/dltp/mwxmlchunk"
sref "github.com/twotwotwo/dltp/sourceref"
"github.com/twotwotwo/dltp/stream"
"github.com/twotwotwo/dltp/zip"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"regexp" // validating input filenames
"runtime"
)
/*
DELTAPACKER FILE FORMAT
DeltaPacker files have a text preamble then binary data. Though the dltp program also
(de)compresses dltp files using bzip2, gzip, etc., that's out of scope here.
The text preamble has the following lines (each ending \n):
- The format name (right now the literal string "DeltaPacker")
- the source URL (now a placeholder)
- the format URL (now a placeholder; if it's updated, we'll print out this URL
as part of the error message)
- a blank line
- a list of files, starting with the output file (only "safe" chars allowed; see
safeFilenameStr regexp below)
- a blank line
They're followed by binary diffs each headed with a source reference, which consists
of three varints (written/read by SourceRef.Write and ReadSource):
source file number (signed; -1 means no source)
start offset (unsigned)
source length (unsigned)
then the 32-bit FNV-1a checksum of the source material (checksums are the only
fixed-size ints in the diff format), then the binary diff, which ends with a 0
instruction (see diff.Patch), then the output checksum.
A source info header with ID, offset, and length all 0 marks the end of the
diffs.
You can see dltp.go for invocation with all the bells and whistles, but use of these
classes goes roughly like:
dpw := dpfile.NewWriter(out, workingDir, sources, [options])
for dpw.WriteSegment() {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
} |
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} else {
panicMsg = "checksum mismatch. this looks likely to be a bug in dltp."
}
os.Remove("dltp-error-report.txt")
crashReport, err := os.Create("dltp-error-report.txt")
if err == nil {
// wish filenames etc were available here
fmt.Fprintln(crashReport, panicMsg)
fmt.Fprintln(crashReport, "SourceRef:", source)
crashReport.WriteString("Original text:\n\n")
crashReport.Write(orig)
crashReport.WriteString("\n\nPatched output:\n\n")
crashReport.Write(text)
crashReport.Close()
panicMsg += " wrote additional information to dltp-error-report.txt"
} else {
panicMsg += " couldn't write additional information (" + err.Error() + ")"
}
panic(panicMsg)
}
// write if not ChangeDump or if changed or if this is preamble
if !dpr.ChangeDump || !bytes.Equal(text, orig) || dpr.lastSeg == nil {
_, err := dpr.out.Write(text)
if err != nil {
panic("couldn't write expanded file")
}
}
dpr.lastSeg = text
return true
}
func (dpr *DPReader) Close() {
for _, r := range dpr.sources {
if c, ok := r.(io.Closer); ok {
c.Close()
}
}
dpr.out.Flush()
} | badFormat = true
} | random_line_split |
dpfile.go | // Public domain, Randall Farmer, 2013
package dpfile
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"github.com/twotwotwo/dltp/alloc"
"github.com/twotwotwo/dltp/diff"
"github.com/twotwotwo/dltp/mwxmlchunk"
sref "github.com/twotwotwo/dltp/sourceref"
"github.com/twotwotwo/dltp/stream"
"github.com/twotwotwo/dltp/zip"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"regexp" // validating input filenames
"runtime"
)
/*
DELTAPACKER FILE FORMAT
DeltaPacker files have a text preamble then binary data. Though the dltp program also
(de)compresses dltp files using bzip2, gzip, etc., that's out of scope here.
The text preamble has the following lines (each ending \n):
- The format name (right now the literal string "DeltaPacker")
- the source URL (now a placeholder)
- the format URL (now a placeholder; if it's updated, we'll print out this URL
as part of the error message)
- a blank line
- a list of files, starting with the output file (only "safe" chars allowed; see
safeFilenameStr regexp below)
- a blank line
They're followed by binary diffs each headed with a source reference, which consists
of three varints (written/read by SourceRef.Write and ReadSource):
source file number (signed; -1 means no source)
start offset (unsigned)
source length (unsigned)
then the 32-bit FNV-1a checksum of the source material (checksums are the only
fixed-size ints in the diff format), then the binary diff, which ends with a 0
instruction (see diff.Patch), then the output checksum.
A source info header with ID, offset, and length all 0 marks the end of the
diffs.
You can see dltp.go for invocation with all the bells and whistles, but use of these
classes goes roughly like:
dpw := dpfile.NewWriter(out, workingDir, sources, [options])
for dpw.WriteSegment() {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum | else {
panicMsg = "checksum mismatch. this looks likely to be a bug in dltp."
}
os.Remove("dltp-error-report.txt")
crashReport, err := os.Create("dltp-error-report.txt")
if err == nil {
// wish filenames etc were available here
fmt.Fprintln(crashReport, panicMsg)
fmt.Fprintln(crashReport, "SourceRef:", source)
crashReport.WriteString("Original text:\n\n")
crashReport.Write(orig)
crashReport.WriteString("\n\nPatched output:\n\n")
crashReport.Write(text)
crashReport.Close()
panicMsg += " wrote additional information to dltp-error-report.txt"
} else {
panicMsg += " couldn't write additional information (" + err.Error() + ")"
}
panic(panicMsg)
}
// write if not ChangeDump or if changed or if this is preamble
if !dpr.ChangeDump || !bytes.Equal(text, orig) || dpr.lastSeg == nil {
_, err := dpr.out.Write(text)
if err != nil {
panic("couldn't write expanded file")
}
}
dpr.lastSeg = text
return true
}
func (dpr *DPReader) Close() {
for _, r := range dpr.sources {
if c, ok := r.(io.Closer); ok {
c.Close()
}
}
dpr.out.Flush()
}
| {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} | conditional_block |
dpfile.go | // Public domain, Randall Farmer, 2013
package dpfile
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"github.com/twotwotwo/dltp/alloc"
"github.com/twotwotwo/dltp/diff"
"github.com/twotwotwo/dltp/mwxmlchunk"
sref "github.com/twotwotwo/dltp/sourceref"
"github.com/twotwotwo/dltp/stream"
"github.com/twotwotwo/dltp/zip"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"regexp" // validating input filenames
"runtime"
)
/*
DELTAPACKER FILE FORMAT
DeltaPacker files have a text preamble then binary data. Though the dltp program also
(de)compresses dltp files using bzip2, gzip, etc., that's out of scope here.
The text preamble has the following lines (each ending \n):
- The format name (right now the literal string "DeltaPacker")
- the source URL (now a placeholder)
- the format URL (now a placeholder; if it's updated, we'll print out this URL
as part of the error message)
- a blank line
- a list of files, starting with the output file (only "safe" chars allowed; see
safeFilenameStr regexp below)
- a blank line
They're followed by binary diffs each headed with a source reference, which consists
of three varints (written/read by SourceRef.Write and ReadSource):
source file number (signed; -1 means no source)
start offset (unsigned)
source length (unsigned)
then the 32-bit FNV-1a checksum of the source material (checksums are the only
fixed-size ints in the diff format), then the binary diff, which ends with a 0
instruction (see diff.Patch), then the output checksum.
A source info header with ID, offset, and length all 0 marks the end of the
diffs.
You can see dltp.go for invocation with all the bells and whistles, but use of these
classes goes roughly like:
dpw := dpfile.NewWriter(out, workingDir, sources, [options])
for dpw.WriteSegment() {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} else {
panicMsg = "checksum mismatch. this looks likely to be a bug in dltp."
}
os.Remove("dltp-error-report.txt")
crashReport, err := os.Create("dltp-error-report.txt")
if err == nil {
// wish filenames etc were available here
fmt.Fprintln(crashReport, panicMsg)
fmt.Fprintln(crashReport, "SourceRef:", source)
crashReport.WriteString("Original text:\n\n")
crashReport.Write(orig)
crashReport.WriteString("\n\nPatched output:\n\n")
crashReport.Write(text)
crashReport.Close()
panicMsg += " wrote additional information to dltp-error-report.txt"
} else {
panicMsg += " couldn't write additional information (" + err.Error() + ")"
}
panic(panicMsg)
}
// write if not ChangeDump or if changed or if this is preamble
if !dpr.ChangeDump || !bytes.Equal(text, orig) || dpr.lastSeg == nil {
_, err := dpr.out.Write(text)
if err != nil {
panic("couldn't write expanded file")
}
}
dpr.lastSeg = text
return true
}
func (dpr *DPReader) | () {
for _, r := range dpr.sources {
if c, ok := r.(io.Closer); ok {
c.Close()
}
}
dpr.out.Flush()
}
| Close | identifier_name |
main.rs | #![allow(dead_code)]
extern crate libc;
extern crate time;
extern crate rand;
extern crate toml;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate colored;
#[macro_use] extern crate prettytable;
extern crate ctrlc;
extern crate clap;
use clap::{Arg, App};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
mod config;
mod run;
mod mutation;
mod analysis;
mod queue;
mod test;
mod stats;
use run::buffered::{ find_one_fuzz_server, BufferedFuzzServerConfig };
use run::{ FuzzServer, Run };
const APP_NAME: &'static str = env!("CARGO_PKG_NAME");
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
const AUTHOR: &'static str = env!("CARGO_PKG_AUTHORS");
const FPGA_DIR: &'static str = "/tmp/fpga";
const WORD_SIZE : usize = 8;
#[derive(Debug)]
struct Args {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid | s >= max_children { break; }
}
q.return_test(active_test.id, history);
q.save_latest(statistics.take_snapshot());
if canceled.load(Ordering::SeqCst) {
println!("User interrupted fuzzing. Going to shut down....");
break;
}
}
server.sync();
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
q.save_latest(statistics.take_snapshot());
// done with the main fuzzing part
statistics.done();
// final bitmap
let bitmap = analysis.get_bitmap();
// print inputs from queue
if args.print_queue {
println!("\n");
println!("Formated Inputs and Coverage!");
for entry in q.entries() {
q.print_entry_summary(entry.id, &mutations);
config.print_inputs(&entry.inputs);
println!("Achieved Coverage:");
let coverage = fuzz_one(server, &entry.inputs);
config.print_test_coverage(&coverage);
println!("\n");
}
}
if args.print_total_cov {
println!("Total Coverage:");
config.print_bitmap(&bitmap);
}
// print statistics
print!("{}", statistics.get_final_snapshot().unwrap());
println!("Bitmap: {:?}", bitmap);
}
fn fuzz_one(server: &mut FuzzServer, input: &[u8]) -> Vec<u8> {
let mut mutator = mutation::identity(input);
if let Run::Done(count, _) = server.run(&mut mutator, 0) {
assert_eq!(count, 1);
} else { assert!(false); }
server.sync();
let feedback = server.pop_coverage().expect("should get exactly one coverage back!");
feedback.data.to_vec()
}
fn get_time() -> std::time::Duration {
let raw = time::get_time();
std::time::Duration::new(raw.sec as u64, raw.nsec as u32)
} | { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_run | conditional_block |
main.rs | #![allow(dead_code)]
extern crate libc;
extern crate time;
extern crate rand;
extern crate toml;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate colored;
#[macro_use] extern crate prettytable;
extern crate ctrlc;
extern crate clap;
use clap::{Arg, App};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
mod config;
mod run;
mod mutation;
mod analysis;
mod queue;
mod test;
mod stats;
use run::buffered::{ find_one_fuzz_server, BufferedFuzzServerConfig };
use run::{ FuzzServer, Run };
const APP_NAME: &'static str = env!("CARGO_PKG_NAME");
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
const AUTHOR: &'static str = env!("CARGO_PKG_AUTHORS");
const FPGA_DIR: &'static str = "/tmp/fpga";
const WORD_SIZE : usize = 8;
#[derive(Debug)]
struct Args {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
q.save_latest(statistics.take_snapshot());
if canceled.load(Ordering::SeqCst) {
println!("User interrupted fuzzing. Going to shut down....");
break;
}
}
server.sync();
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
q.save_latest(statistics.take_snapshot());
// done with the main fuzzing part
statistics.done();
// final bitmap
let bitmap = analysis.get_bitmap();
// print inputs from queue
if args.print_queue {
println!("\n");
println!("Formated Inputs and Coverage!");
for entry in q.entries() {
q.print_entry_summary(entry.id, &mutations);
config.print_inputs(&entry.inputs);
println!("Achieved Coverage:");
let coverage = fuzz_one(server, &entry.inputs);
config.print_test_coverage(&coverage);
println!("\n");
}
}
if args.print_total_cov {
println!("Total Coverage:");
config.print_bitmap(&bitmap);
}
// print statistics
print!("{}", statistics.get_final_snapshot().unwrap());
println!("Bitmap: {:?}", bitmap);
}
fn fuzz_one(server: &mut FuzzServer, input: &[u8]) -> Vec<u8> {
let mut mutator = mutation::identity(input);
if let Run::Done(count, _) = server.run(&mut mutator, 0) {
assert_eq!(count, 1);
} else { assert!(false); }
server.sync();
let feedback = server.pop_coverage().expect("should get exactly one coverage back!");
feedback.data.to_vec()
}
fn get_time() -> std::time::Duration {
let raw = time::get_ti | me();
std::time::Duration::new(raw.sec as u64, raw.nsec as u32)
} | identifier_body |
|
main.rs | #![allow(dead_code)]
extern crate libc;
extern crate time;
extern crate rand;
extern crate toml;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate colored;
#[macro_use] extern crate prettytable;
extern crate ctrlc;
extern crate clap;
use clap::{Arg, App};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
mod config;
mod run;
mod mutation;
mod analysis;
mod queue;
mod test;
mod stats;
use run::buffered::{ find_one_fuzz_server, BufferedFuzzServerConfig };
use run::{ FuzzServer, Run };
const APP_NAME: &'static str = env!("CARGO_PKG_NAME");
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
const AUTHOR: &'static str = env!("CARGO_PKG_AUTHORS");
const FPGA_DIR: &'static str = "/tmp/fpga";
const WORD_SIZE : usize = 8;
#[derive(Debug)]
struct | {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
q.save_latest(statistics.take_snapshot());
if canceled.load(Ordering::SeqCst) {
println!("User interrupted fuzzing. Going to shut down....");
break;
}
}
server.sync();
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
q.save_latest(statistics.take_snapshot());
// done with the main fuzzing part
statistics.done();
// final bitmap
let bitmap = analysis.get_bitmap();
// print inputs from queue
if args.print_queue {
println!("\n");
println!("Formated Inputs and Coverage!");
for entry in q.entries() {
q.print_entry_summary(entry.id, &mutations);
config.print_inputs(&entry.inputs);
println!("Achieved Coverage:");
let coverage = fuzz_one(server, &entry.inputs);
config.print_test_coverage(&coverage);
println!("\n");
}
}
if args.print_total_cov {
println!("Total Coverage:");
config.print_bitmap(&bitmap);
}
// print statistics
print!("{}", statistics.get_final_snapshot().unwrap());
println!("Bitmap: {:?}", bitmap);
}
fn fuzz_one(server: &mut FuzzServer, input: &[u8]) -> Vec<u8> {
let mut mutator = mutation::identity(input);
if let Run::Done(count, _) = server.run(&mut mutator, 0) {
assert_eq!(count, 1);
} else { assert!(false); }
server.sync();
let feedback = server.pop_coverage().expect("should get exactly one coverage back!");
feedback.data.to_vec()
}
fn get_time() -> std::time::Duration {
let raw = time::get_time();
std::time::Duration::new(raw.sec as u64, raw.nsec as u32)
} | Args | identifier_name |
main.rs | #![allow(dead_code)]
extern crate libc;
extern crate time;
extern crate rand;
extern crate toml;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate colored;
#[macro_use] extern crate prettytable;
extern crate ctrlc;
extern crate clap;
use clap::{Arg, App};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
mod config;
mod run;
mod mutation;
mod analysis;
mod queue;
mod test;
mod stats;
use run::buffered::{ find_one_fuzz_server, BufferedFuzzServerConfig };
use run::{ FuzzServer, Run };
const APP_NAME: &'static str = env!("CARGO_PKG_NAME");
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
const AUTHOR: &'static str = env!("CARGO_PKG_AUTHORS");
const FPGA_DIR: &'static str = "/tmp/fpga";
const WORD_SIZE : usize = 8;
#[derive(Debug)]
struct Args {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
| } else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
q.save_latest(statistics.take_snapshot());
if canceled.load(Ordering::SeqCst) {
println!("User interrupted fuzzing. Going to shut down....");
break;
}
}
server.sync();
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
q.save_latest(statistics.take_snapshot());
// done with the main fuzzing part
statistics.done();
// final bitmap
let bitmap = analysis.get_bitmap();
// print inputs from queue
if args.print_queue {
println!("\n");
println!("Formated Inputs and Coverage!");
for entry in q.entries() {
q.print_entry_summary(entry.id, &mutations);
config.print_inputs(&entry.inputs);
println!("Achieved Coverage:");
let coverage = fuzz_one(server, &entry.inputs);
config.print_test_coverage(&coverage);
println!("\n");
}
}
if args.print_total_cov {
println!("Total Coverage:");
config.print_bitmap(&bitmap);
}
// print statistics
print!("{}", statistics.get_final_snapshot().unwrap());
println!("Bitmap: {:?}", bitmap);
}
fn fuzz_one(server: &mut FuzzServer, input: &[u8]) -> Vec<u8> {
let mut mutator = mutation::identity(input);
if let Run::Done(count, _) = server.run(&mut mutator, 0) {
assert_eq!(count, 1);
} else { assert!(false); }
server.sync();
let feedback = server.pop_coverage().expect("should get exactly one coverage back!");
feedback.data.to_vec()
}
fn get_time() -> std::time::Duration {
let raw = time::get_time();
std::time::Duration::new(raw.sec as u64, raw.nsec as u32)
} | let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config); | random_line_split |
http_remote.rs | //! Access to a HTTP-based crate registry. See [`HttpRegistry`] for details.
use crate::core::{PackageId, SourceId};
use crate::sources::registry::download;
use crate::sources::registry::MaybeLock;
use crate::sources::registry::{LoadResponse, RegistryConfig, RegistryData};
use crate::util::errors::{CargoResult, HttpNotSuccessful};
use crate::util::network::http::http_handle;
use crate::util::network::retry::{Retry, RetryResult};
use crate::util::network::sleep::SleepTracker;
use crate::util::{auth, Config, Filesystem, IntoUrl, Progress, ProgressStyle};
use anyhow::Context;
use cargo_credential::Operation;
use cargo_util::paths;
use curl::easy::{Easy, List};
use curl::multi::{EasyHandle, Multi};
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fs::{self, File};
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::str;
use std::task::{ready, Poll};
use std::time::Duration;
use tracing::{debug, trace};
use url::Url;
// HTTP headers
const ETAG: &'static str = "etag";
const LAST_MODIFIED: &'static str = "last-modified";
const WWW_AUTHENTICATE: &'static str = "www-authenticate";
const IF_NONE_MATCH: &'static str = "if-none-match";
const IF_MODIFIED_SINCE: &'static str = "if-modified-since";
const UNKNOWN: &'static str = "Unknown";
/// A registry served by the HTTP-based registry API.
///
/// This type is primarily accessed through the [`RegistryData`] trait.
///
/// `HttpRegistry` implements the HTTP-based registry API outlined in [RFC 2789]. Read the RFC for
/// the complete protocol, but _roughly_ the implementation loads each index file (e.g.,
/// config.json or re/ge/regex) from an HTTP service rather than from a locally cloned git
/// repository. The remote service can more or less be a static file server that simply serves the
/// contents of the origin git repository.
///
/// Implemented naively, this leads to a significant amount of network traffic, as a lookup of any
/// index file would need to check with the remote backend if the index file has changed. This
/// cost is somewhat mitigated by the use of HTTP conditional fetches (`If-Modified-Since` and
/// `If-None-Match` for `ETag`s) which can be efficiently handled by HTTP/2.
///
/// [RFC 2789]: https://github.com/rust-lang/rfcs/pull/2789
pub struct HttpRegistry<'cfg> {
/// Path to the registry index (`$CARGO_HOME/registry/index/$REG-HASH`).
///
/// To be fair, `HttpRegistry` doesn't store the registry index it
/// downloads on the file system, but other cached data like registry
/// configuration could be stored here.
index_path: Filesystem,
/// Path to the cache of `.crate` files (`$CARGO_HOME/registry/cache/$REG-HASH`).
cache_path: Filesystem,
/// The unique identifier of this registry source.
source_id: SourceId,
config: &'cfg Config,
/// Store the server URL without the protocol prefix (sparse+)
url: Url,
/// HTTP multi-handle for asynchronous/parallel requests.
multi: Multi,
/// Has the client requested a cache update?
///
/// Only if they have do we double-check the freshness of each locally-stored index file.
requested_update: bool,
/// State for currently pending index downloads.
downloads: Downloads<'cfg>,
/// Does the config say that we can use HTTP multiplexing?
multiplexing: bool,
/// What paths have we already fetched since the last index update?
///
/// We do not need to double-check any of these index files since we have already done so.
fresh: HashSet<PathBuf>,
/// Have we started to download any index files?
fetch_started: bool,
/// Cached registry configuration.
registry_config: Option<RegistryConfig>,
/// Should we include the authorization header?
auth_required: bool,
/// Url to get a token for the registry.
login_url: Option<Url>,
/// Headers received with an HTTP 401.
auth_error_headers: Vec<String>,
/// Disables status messages.
quiet: bool,
}
/// State for currently pending index file downloads.
struct Downloads<'cfg> {
/// When a download is started, it is added to this map. The key is a
/// "token" (see [`Download::token`]). It is removed once the download is
/// finished.
pending: HashMap<usize, (Download<'cfg>, EasyHandle)>,
/// Set of paths currently being downloaded.
/// This should stay in sync with the `pending` field.
pending_paths: HashSet<PathBuf>,
/// Downloads that have failed and are waiting to retry again later.
sleeping: SleepTracker<(Download<'cfg>, Easy)>,
/// The final result of each download.
results: HashMap<PathBuf, CargoResult<CompletedDownload>>,
/// The next ID to use for creating a token (see [`Download::token`]).
next: usize,
/// Progress bar.
progress: RefCell<Option<Progress<'cfg>>>,
/// Number of downloads that have successfully finished.
downloads_finished: usize,
/// Number of times the caller has requested blocking. This is used for
/// an estimate of progress.
blocking_calls: usize,
}
/// Represents a single index file download, including its progress and retry.
struct Download<'cfg> {
/// The token for this download, used as the key of the
/// [`Downloads::pending`] map and stored in [`EasyHandle`] as well.
token: usize,
/// The path of the package that we're downloading.
path: PathBuf,
/// Actual downloaded data, updated throughout the lifetime of this download.
data: RefCell<Vec<u8>>,
/// HTTP headers.
header_map: RefCell<Headers>,
/// Logic used to track retrying this download if it's a spurious failure.
retry: Retry<'cfg>,
}
/// HTTPS headers [`HttpRegistry`] cares about.
#[derive(Default)]
struct Headers {
last_modified: Option<String>,
etag: Option<String>,
www_authenticate: Vec<String>,
/// All headers, including explicit headers above.
all: Vec<String>,
}
/// HTTP status code [`HttpRegistry`] cares about.
enum StatusCode {
Success,
NotModified,
NotFound,
Unauthorized,
}
/// Represents a complete [`Download`] from an HTTP request.
///
/// Usually it is constructed in [`HttpRegistry::handle_completed_downloads`],
/// and then returns to the caller of [`HttpRegistry::load()`].
struct CompletedDownload {
response_code: StatusCode,
data: Vec<u8>,
header_map: Headers,
}
impl<'cfg> HttpRegistry<'cfg> {
/// Creates a HTTP-rebased remote registry for `source_id`.
///
/// * `name` --- Name of a path segment where `.crate` tarballs and the
/// registry index are stored. Expect to be unique.
pub fn new(
source_id: SourceId,
config: &'cfg Config,
name: &str,
) -> CargoResult<HttpRegistry<'cfg>> {
let url = source_id.url().as_str();
// Ensure the url ends with a slash so we can concatenate paths.
if !url.ends_with('/') {
anyhow::bail!("sparse registry url must end in a slash `/`: {url}")
}
assert!(source_id.is_sparse());
let url = url
.strip_prefix("sparse+")
.expect("sparse registry needs sparse+ prefix")
.into_url()
.expect("a url with the sparse+ stripped should still be valid");
Ok(HttpRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id,
config,
url,
multi: Multi::new(),
multiplexing: false,
downloads: Downloads {
next: 0,
pending: HashMap::new(),
pending_paths: HashSet::new(),
sleeping: SleepTracker::new(),
results: HashMap::new(),
progress: RefCell::new(Some(Progress::with_style(
"Fetch",
ProgressStyle::Indeterminate,
config,
))),
downloads_finished: 0,
blocking_calls: 0,
},
fresh: HashSet::new(),
requested_update: false,
fetch_started: false,
registry_config: None,
auth_required: false,
login_url: None,
auth_error_headers: vec![],
quiet: false,
})
}
/// Splits HTTP `HEADER: VALUE` to a tuple.
fn | (buf: &[u8]) -> Option<(&str, &str)> {
if buf.is_empty() {
return None;
}
let buf = std::str::from_utf8(buf).ok()?.trim_end();
// Don't let server sneak extra lines anywhere.
if buf.contains('\n') {
return None;
}
let (tag, value) = buf.split_once(':')?;
let value = value.trim();
Some((tag, value))
}
/// Setup the necessary works before the first fetch gets started.
///
/// This is a no-op if called more than one time.
fn start_fetch(&mut self) -> CargoResult<()> {
if self.fetch_started {
// We only need to run the setup code once.
return Ok(());
}
self.fetch_started = true;
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
self.multiplexing = self.config.http_config()?.multiplexing.unwrap_or(true);
self.multi
.pipelining(false, self.multiplexing)
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood the server with connections
self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results {
let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
}
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is never stored in the index cache",
))),
}
}
/// Moves failed [`Download`]s that are ready to retry to the pending queue.
fn add_sleepers(&mut self) -> CargoResult<()> {
for (dl, handle) in self.downloads.sleeping.to_retry() {
let mut handle = self.multi.add(handle)?;
handle.set_token(dl.token)?;
let is_new = self.downloads.pending_paths.insert(dl.path.to_path_buf());
assert!(is_new, "path queued for download more than once");
let previous = self.downloads.pending.insert(dl.token, (dl, handle));
assert!(previous.is_none(), "dl token queued more than once");
}
Ok(())
}
}
impl<'cfg> RegistryData for HttpRegistry<'cfg> {
fn prepare(&self) -> CargoResult<()> {
Ok(())
}
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
self.config.assert_package_cache_locked(path)
}
fn is_updated(&self) -> bool {
self.requested_update
}
fn load(
&mut self,
_root: &Path,
path: &Path,
index_version: Option<&str>,
) -> Poll<CargoResult<LoadResponse>> {
trace!("load: {}", path.display());
if let Some(_token) = self.downloads.pending_paths.get(path) {
debug!("dependency is still pending: {}", path.display());
return Poll::Pending;
}
if let Some(index_version) = index_version {
trace!(
"local cache of {} is available at version `{}`",
path.display(),
index_version
);
if self.is_fresh(path) {
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
} else if self.fresh.contains(path) {
// We have no cached copy of this file, and we already downloaded it.
debug!(
"cache did not contain previously downloaded file {}",
path.display()
);
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if self.config.offline() || self.config.cli_unstable().no_index_update {
// Return NotFound in offline mode when the file doesn't exist in the cache.
// If this results in resolution failure, the resolver will suggest
// removing the --offline flag.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if let Some(result) = self.downloads.results.remove(path) {
let result =
result.with_context(|| format!("download of {} failed", path.display()))?;
let is_new = self.fresh.insert(path.to_path_buf());
assert!(
is_new,
"downloaded the index file `{}` twice",
path.display()
);
// The status handled here need to be kept in sync with the codes handled
// in `handle_completed_downloads`
match result.response_code {
StatusCode::Success => {
let response_index_version = if let Some(etag) = result.header_map.etag {
format!("{}: {}", ETAG, etag)
} else if let Some(lm) = result.header_map.last_modified {
format!("{}: {}", LAST_MODIFIED, lm)
} else {
UNKNOWN.to_string()
};
trace!("index file version: {}", response_index_version);
return Poll::Ready(Ok(LoadResponse::Data {
raw_data: result.data,
index_version: Some(response_index_version),
}));
}
StatusCode::NotModified => {
// Not Modified: the data in the cache is still the latest.
if index_version.is_none() {
return Poll::Ready(Err(anyhow::anyhow!(
"server said not modified (HTTP 304) when no local cache exists"
)));
}
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
StatusCode::NotFound => {
// The crate was not found or deleted from the registry.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
StatusCode::Unauthorized
if !self.auth_required
&& path == Path::new(RegistryConfig::NAME)
&& self.config.cli_unstable().registry_auth =>
{
debug!(target: "network", "re-attempting request for config.json with authorization included.");
self.fresh.remove(path);
self.auth_required = true;
// Look for a `www-authenticate` header with the `Cargo` scheme.
for header in &result.header_map.www_authenticate {
for challenge in http_auth::ChallengeParser::new(header) {
match challenge {
Ok(challenge) if challenge.scheme.eq_ignore_ascii_case("Cargo") => {
// Look for the `login_url` parameter.
for (param, value) in challenge.params {
if param.eq_ignore_ascii_case("login_url") {
self.login_url = Some(value.to_unescaped().into_url()?);
}
}
}
Ok(challenge) => {
debug!(target: "network", "ignoring non-Cargo challenge: {}", challenge.scheme)
}
Err(e) => {
debug!(target: "network", "failed to parse challenge: {}", e)
}
}
}
}
self.auth_error_headers = result.header_map.all;
}
StatusCode::Unauthorized => {
let err = Err(HttpNotSuccessful {
code: 401,
body: result.data,
url: self.full_url(path),
ip: None,
headers: result.header_map.all,
}
.into());
if self.auth_required {
return Poll::Ready(err.context(auth::AuthorizationError {
sid: self.source_id.clone(),
default_registry: self.config.default_registry()?,
login_url: self.login_url.clone(),
reason: auth::AuthorizationErrorReason::TokenRejected,
}));
} else {
return Poll::Ready(err);
}
}
}
}
if path != Path::new(RegistryConfig::NAME) {
self.auth_required = ready!(self.config()?).auth_required;
} else if !self.auth_required {
// Check if there's a cached config that says auth is required.
// This allows avoiding the initial unauthenticated request to probe.
if let Some(config) = self.config_cached()? {
self.auth_required = config.auth_required;
}
}
if !self.config.cli_unstable().registry_auth {
self.auth_required = false;
}
// Looks like we're going to have to do a network request.
self.start_fetch()?;
let mut handle = http_handle(self.config)?;
let full_url = self.full_url(path);
debug!(target: "network", "fetch {}", full_url);
handle.get(true)?;
handle.url(&full_url)?;
handle.follow_location(true)?;
// Enable HTTP/2 if possible.
crate::try_old_curl_http2_pipewait!(self.multiplexing, handle);
let mut headers = List::new();
// Include a header to identify the protocol. This allows the server to
// know that Cargo is attempting to use the sparse protocol.
headers.append("cargo-protocol: version=1")?;
headers.append("accept: text/plain")?;
// If we have a cached copy of the file, include IF_NONE_MATCH or IF_MODIFIED_SINCE header.
if let Some(index_version) = index_version {
if let Some((key, value)) = index_version.split_once(':') {
match key {
ETAG => headers.append(&format!("{}: {}", IF_NONE_MATCH, value.trim()))?,
LAST_MODIFIED => {
headers.append(&format!("{}: {}", IF_MODIFIED_SINCE, value.trim()))?
}
_ => debug!("unexpected index version: {}", index_version),
}
}
}
if self.auth_required {
let authorization = auth::auth_token(
self.config,
&self.source_id,
self.login_url.as_ref(),
Operation::Read,
self.auth_error_headers.clone(),
)?;
headers.append(&format!("Authorization: {}", authorization))?;
trace!(target: "network", "including authorization for {}", full_url);
}
handle.http_headers(headers)?;
// We're going to have a bunch of downloads all happening "at the same time".
// So, we need some way to track what headers/data/responses are for which request.
// We do that through this token. Each request (and associated response) gets one.
let token = self.downloads.next;
self.downloads.next += 1;
debug!(target: "network", "downloading {} as {}", path.display(), token);
let is_new = self.downloads.pending_paths.insert(path.to_path_buf());
assert!(is_new, "path queued for download more than once");
// Each write should go to self.downloads.pending[&token].data.
// Since the write function must be 'static, we access downloads through a thread-local.
// That thread-local is set up in `block_until_ready` when it calls self.multi.perform,
// which is what ultimately calls this method.
handle.write_function(move |buf| {
trace!(target: "network", "{} - {} bytes of data", token, buf.len());
tls::with(|downloads| {
if let Some(downloads) = downloads {
downloads.pending[&token]
.0
.data
.borrow_mut()
.extend_from_slice(buf);
}
});
Ok(buf.len())
})?;
// And ditto for the header function.
handle.header_function(move |buf| {
if let Some((tag, value)) = Self::handle_http_header(buf) {
tls::with(|downloads| {
if let Some(downloads) = downloads {
let mut header_map = downloads.pending[&token].0.header_map.borrow_mut();
header_map.all.push(format!("{tag}: {value}"));
match tag.to_ascii_lowercase().as_str() {
LAST_MODIFIED => header_map.last_modified = Some(value.to_string()),
ETAG => header_map.etag = Some(value.to_string()),
WWW_AUTHENTICATE => header_map.www_authenticate.push(value.to_string()),
_ => {}
}
}
});
}
true
})?;
let dl = Download {
token,
path: path.to_path_buf(),
data: RefCell::new(Vec::new()),
header_map: Default::default(),
retry: Retry::new(self.config)?,
};
// Finally add the request we've lined up to the pool of requests that cURL manages.
let mut handle = self.multi.add(handle)?;
handle.set_token(token)?;
self.downloads.pending.insert(dl.token, (dl, handle));
Poll::Pending
}
fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
let mut cfg = ready!(self.config()?).clone();
if !self.config.cli_unstable().registry_auth {
cfg.auth_required = false;
}
Poll::Ready(Ok(Some(cfg)))
}
fn invalidate_cache(&mut self) {
// Actually updating the index is more or less a no-op for this implementation.
// All it does is ensure that a subsequent load will double-check files with the
// server rather than rely on a locally cached copy of the index files.
debug!("invalidated index cache");
self.fresh.clear();
self.requested_update = true;
}
fn set_quiet(&mut self, quiet: bool) {
self.quiet = quiet;
self.downloads.progress.replace(None);
}
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let registry_config = loop {
match self.config()? {
Poll::Pending => self.block_until_ready()?,
Poll::Ready(cfg) => break cfg.to_owned(),
}
};
download::download(
&self.cache_path,
&self.config,
pkg,
checksum,
registry_config,
)
}
fn finish_download(
&mut self,
pkg: PackageId,
checksum: &str,
data: &[u8],
) -> CargoResult<File> {
download::finish_download(&self.cache_path, &self.config, pkg, checksum, data)
}
fn is_crate_downloaded(&self, pkg: PackageId) -> bool {
download::is_crate_downloaded(&self.cache_path, &self.config, pkg)
}
fn block_until_ready(&mut self) -> CargoResult<()> {
trace!(target: "network",
"block_until_ready: {} transfers pending",
self.downloads.pending.len()
);
self.downloads.blocking_calls += 1;
loop {
self.handle_completed_downloads()?;
self.add_sleepers()?;
let remaining_in_multi = tls::set(&self.downloads, || {
self.multi
.perform()
.with_context(|| "failed to perform http requests")
})?;
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
if remaining_in_multi + self.downloads.sleeping.len() as u32 == 0 {
return Ok(());
}
if self.downloads.pending.is_empty() {
let delay = self.downloads.sleeping.time_to_next().unwrap();
debug!(target: "network", "sleeping main thread for {delay:?}");
std::thread::sleep(delay);
} else {
// We have no more replies to provide the caller with,
// so we need to wait until cURL has something new for us.
let timeout = self
.multi
.get_timeout()?
.unwrap_or_else(|| Duration::new(1, 0));
self.multi
.wait(&mut [], timeout)
.with_context(|| "failed to wait on curl `Multi`")?;
}
}
}
}
impl<'cfg> Downloads<'cfg> {
/// Updates the state of the progress bar for downloads.
fn tick(&self) -> CargoResult<()> {
let mut progress = self.progress.borrow_mut();
let Some(progress) = progress.as_mut() else {
return Ok(());
};
// Since the sparse protocol discovers dependencies as it goes,
// it's not possible to get an accurate progress indication.
//
// As an approximation, we assume that the depth of the dependency graph
// is fixed, and base the progress on how many times the caller has asked
// for blocking. If there are actually additional dependencies, the progress
// bar will get stuck. If there are fewer dependencies, it will disappear
// early. It will never go backwards.
//
// The status text also contains the number of completed & pending requests, which
// gives an better indication of forward progress.
let approximate_tree_depth = 10;
progress.tick(
self.blocking_calls.min(approximate_tree_depth),
approximate_tree_depth + 1,
&format!(
" {} complete; {} pending",
self.downloads_finished,
self.pending.len() + self.sleeping.len()
),
)
}
}
mod tls {
use super::Downloads;
use std::cell::Cell;
thread_local!(static PTR: Cell<usize> = Cell::new(0));
pub(super) fn with<R>(f: impl FnOnce(Option<&Downloads<'_>>) -> R) -> R {
let ptr = PTR.with(|p| p.get());
if ptr == 0 {
f(None)
} else {
// Safety: * `ptr` is only set by `set` below which ensures the type is correct.
let ptr = unsafe { &*(ptr as *const Downloads<'_>) };
f(Some(ptr))
}
}
pub(super) fn set<R>(dl: &Downloads<'_>, f: impl FnOnce() -> R) -> R {
struct Reset<'a, T: Copy>(&'a Cell<T>, T);
impl<'a, T: Copy> Drop for Reset<'a, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
PTR.with(|p| {
let _reset = Reset(p, p.get());
p.set(dl as *const Downloads<'_> as usize);
f()
})
}
}
| handle_http_header | identifier_name |
http_remote.rs | //! Access to a HTTP-based crate registry. See [`HttpRegistry`] for details.
use crate::core::{PackageId, SourceId};
use crate::sources::registry::download;
use crate::sources::registry::MaybeLock;
use crate::sources::registry::{LoadResponse, RegistryConfig, RegistryData};
use crate::util::errors::{CargoResult, HttpNotSuccessful};
use crate::util::network::http::http_handle;
use crate::util::network::retry::{Retry, RetryResult};
use crate::util::network::sleep::SleepTracker;
use crate::util::{auth, Config, Filesystem, IntoUrl, Progress, ProgressStyle};
use anyhow::Context;
use cargo_credential::Operation;
use cargo_util::paths;
use curl::easy::{Easy, List};
use curl::multi::{EasyHandle, Multi};
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fs::{self, File};
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::str;
use std::task::{ready, Poll};
use std::time::Duration;
use tracing::{debug, trace};
use url::Url;
// HTTP headers
const ETAG: &'static str = "etag";
const LAST_MODIFIED: &'static str = "last-modified";
const WWW_AUTHENTICATE: &'static str = "www-authenticate";
const IF_NONE_MATCH: &'static str = "if-none-match";
const IF_MODIFIED_SINCE: &'static str = "if-modified-since";
const UNKNOWN: &'static str = "Unknown";
/// A registry served by the HTTP-based registry API.
///
/// This type is primarily accessed through the [`RegistryData`] trait.
///
/// `HttpRegistry` implements the HTTP-based registry API outlined in [RFC 2789]. Read the RFC for
/// the complete protocol, but _roughly_ the implementation loads each index file (e.g.,
/// config.json or re/ge/regex) from an HTTP service rather than from a locally cloned git
/// repository. The remote service can more or less be a static file server that simply serves the
/// contents of the origin git repository.
///
/// Implemented naively, this leads to a significant amount of network traffic, as a lookup of any
/// index file would need to check with the remote backend if the index file has changed. This
/// cost is somewhat mitigated by the use of HTTP conditional fetches (`If-Modified-Since` and
/// `If-None-Match` for `ETag`s) which can be efficiently handled by HTTP/2.
///
/// [RFC 2789]: https://github.com/rust-lang/rfcs/pull/2789
pub struct HttpRegistry<'cfg> {
/// Path to the registry index (`$CARGO_HOME/registry/index/$REG-HASH`).
///
/// To be fair, `HttpRegistry` doesn't store the registry index it
/// downloads on the file system, but other cached data like registry
/// configuration could be stored here.
index_path: Filesystem,
/// Path to the cache of `.crate` files (`$CARGO_HOME/registry/cache/$REG-HASH`).
cache_path: Filesystem,
/// The unique identifier of this registry source.
source_id: SourceId,
config: &'cfg Config,
/// Store the server URL without the protocol prefix (sparse+)
url: Url,
/// HTTP multi-handle for asynchronous/parallel requests.
multi: Multi,
/// Has the client requested a cache update?
///
/// Only if they have do we double-check the freshness of each locally-stored index file.
requested_update: bool,
/// State for currently pending index downloads.
downloads: Downloads<'cfg>,
/// Does the config say that we can use HTTP multiplexing?
multiplexing: bool,
/// What paths have we already fetched since the last index update?
///
/// We do not need to double-check any of these index files since we have already done so.
fresh: HashSet<PathBuf>,
/// Have we started to download any index files?
fetch_started: bool,
/// Cached registry configuration.
registry_config: Option<RegistryConfig>,
/// Should we include the authorization header?
auth_required: bool,
/// Url to get a token for the registry.
login_url: Option<Url>,
/// Headers received with an HTTP 401.
auth_error_headers: Vec<String>,
/// Disables status messages.
quiet: bool,
}
/// State for currently pending index file downloads.
struct Downloads<'cfg> {
/// When a download is started, it is added to this map. The key is a
/// "token" (see [`Download::token`]). It is removed once the download is
/// finished.
pending: HashMap<usize, (Download<'cfg>, EasyHandle)>,
/// Set of paths currently being downloaded.
/// This should stay in sync with the `pending` field.
pending_paths: HashSet<PathBuf>,
/// Downloads that have failed and are waiting to retry again later.
sleeping: SleepTracker<(Download<'cfg>, Easy)>,
/// The final result of each download.
results: HashMap<PathBuf, CargoResult<CompletedDownload>>,
/// The next ID to use for creating a token (see [`Download::token`]).
next: usize,
/// Progress bar.
progress: RefCell<Option<Progress<'cfg>>>,
/// Number of downloads that have successfully finished.
downloads_finished: usize,
/// Number of times the caller has requested blocking. This is used for
/// an estimate of progress.
blocking_calls: usize,
}
/// Represents a single index file download, including its progress and retry.
struct Download<'cfg> {
/// The token for this download, used as the key of the
/// [`Downloads::pending`] map and stored in [`EasyHandle`] as well.
token: usize,
/// The path of the package that we're downloading.
path: PathBuf,
/// Actual downloaded data, updated throughout the lifetime of this download.
data: RefCell<Vec<u8>>,
/// HTTP headers.
header_map: RefCell<Headers>,
/// Logic used to track retrying this download if it's a spurious failure.
retry: Retry<'cfg>,
}
/// HTTPS headers [`HttpRegistry`] cares about.
#[derive(Default)]
struct Headers {
last_modified: Option<String>,
etag: Option<String>,
www_authenticate: Vec<String>,
/// All headers, including explicit headers above.
all: Vec<String>,
}
/// HTTP status code [`HttpRegistry`] cares about.
enum StatusCode {
Success,
NotModified,
NotFound,
Unauthorized,
}
/// Represents a complete [`Download`] from an HTTP request.
///
/// Usually it is constructed in [`HttpRegistry::handle_completed_downloads`],
/// and then returns to the caller of [`HttpRegistry::load()`].
struct CompletedDownload {
response_code: StatusCode,
data: Vec<u8>,
header_map: Headers,
}
impl<'cfg> HttpRegistry<'cfg> {
/// Creates a HTTP-rebased remote registry for `source_id`.
///
/// * `name` --- Name of a path segment where `.crate` tarballs and the
/// registry index are stored. Expect to be unique.
pub fn new(
source_id: SourceId,
config: &'cfg Config,
name: &str,
) -> CargoResult<HttpRegistry<'cfg>> {
let url = source_id.url().as_str();
// Ensure the url ends with a slash so we can concatenate paths.
if !url.ends_with('/') {
anyhow::bail!("sparse registry url must end in a slash `/`: {url}")
}
assert!(source_id.is_sparse());
let url = url
.strip_prefix("sparse+")
.expect("sparse registry needs sparse+ prefix")
.into_url()
.expect("a url with the sparse+ stripped should still be valid");
Ok(HttpRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id,
config,
url,
multi: Multi::new(),
multiplexing: false,
downloads: Downloads {
next: 0,
pending: HashMap::new(),
pending_paths: HashSet::new(),
sleeping: SleepTracker::new(),
results: HashMap::new(),
progress: RefCell::new(Some(Progress::with_style(
"Fetch",
ProgressStyle::Indeterminate,
config,
))),
downloads_finished: 0,
blocking_calls: 0,
},
fresh: HashSet::new(),
requested_update: false,
fetch_started: false,
registry_config: None,
auth_required: false,
login_url: None,
auth_error_headers: vec![],
quiet: false,
})
}
/// Splits HTTP `HEADER: VALUE` to a tuple.
fn handle_http_header(buf: &[u8]) -> Option<(&str, &str)> {
if buf.is_empty() {
return None;
}
let buf = std::str::from_utf8(buf).ok()?.trim_end();
// Don't let server sneak extra lines anywhere.
if buf.contains('\n') {
return None;
}
let (tag, value) = buf.split_once(':')?;
let value = value.trim();
Some((tag, value))
}
/// Setup the necessary works before the first fetch gets started.
///
/// This is a no-op if called more than one time.
fn start_fetch(&mut self) -> CargoResult<()> {
if self.fetch_started {
// We only need to run the setup code once.
return Ok(());
}
self.fetch_started = true;
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
self.multiplexing = self.config.http_config()?.multiplexing.unwrap_or(true);
self.multi
.pipelining(false, self.multiplexing)
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood the server with connections
self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results {
let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
}
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is never stored in the index cache",
))),
}
}
/// Moves failed [`Download`]s that are ready to retry to the pending queue.
fn add_sleepers(&mut self) -> CargoResult<()> {
for (dl, handle) in self.downloads.sleeping.to_retry() {
let mut handle = self.multi.add(handle)?;
handle.set_token(dl.token)?;
let is_new = self.downloads.pending_paths.insert(dl.path.to_path_buf());
assert!(is_new, "path queued for download more than once");
let previous = self.downloads.pending.insert(dl.token, (dl, handle));
assert!(previous.is_none(), "dl token queued more than once");
}
Ok(())
}
}
impl<'cfg> RegistryData for HttpRegistry<'cfg> {
fn prepare(&self) -> CargoResult<()> {
Ok(())
}
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
self.config.assert_package_cache_locked(path)
}
fn is_updated(&self) -> bool {
self.requested_update
}
fn load(
&mut self,
_root: &Path,
path: &Path,
index_version: Option<&str>,
) -> Poll<CargoResult<LoadResponse>> {
trace!("load: {}", path.display());
if let Some(_token) = self.downloads.pending_paths.get(path) {
debug!("dependency is still pending: {}", path.display());
return Poll::Pending;
}
if let Some(index_version) = index_version {
trace!(
"local cache of {} is available at version `{}`",
path.display(),
index_version
);
if self.is_fresh(path) {
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
} else if self.fresh.contains(path) {
// We have no cached copy of this file, and we already downloaded it.
debug!(
"cache did not contain previously downloaded file {}",
path.display()
);
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if self.config.offline() || self.config.cli_unstable().no_index_update {
// Return NotFound in offline mode when the file doesn't exist in the cache.
// If this results in resolution failure, the resolver will suggest
// removing the --offline flag.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if let Some(result) = self.downloads.results.remove(path) {
let result =
result.with_context(|| format!("download of {} failed", path.display()))?;
let is_new = self.fresh.insert(path.to_path_buf());
assert!(
is_new,
"downloaded the index file `{}` twice",
path.display()
);
// The status handled here need to be kept in sync with the codes handled
// in `handle_completed_downloads`
match result.response_code {
StatusCode::Success => {
let response_index_version = if let Some(etag) = result.header_map.etag { | } else {
UNKNOWN.to_string()
};
trace!("index file version: {}", response_index_version);
return Poll::Ready(Ok(LoadResponse::Data {
raw_data: result.data,
index_version: Some(response_index_version),
}));
}
StatusCode::NotModified => {
// Not Modified: the data in the cache is still the latest.
if index_version.is_none() {
return Poll::Ready(Err(anyhow::anyhow!(
"server said not modified (HTTP 304) when no local cache exists"
)));
}
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
StatusCode::NotFound => {
// The crate was not found or deleted from the registry.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
StatusCode::Unauthorized
if !self.auth_required
&& path == Path::new(RegistryConfig::NAME)
&& self.config.cli_unstable().registry_auth =>
{
debug!(target: "network", "re-attempting request for config.json with authorization included.");
self.fresh.remove(path);
self.auth_required = true;
// Look for a `www-authenticate` header with the `Cargo` scheme.
for header in &result.header_map.www_authenticate {
for challenge in http_auth::ChallengeParser::new(header) {
match challenge {
Ok(challenge) if challenge.scheme.eq_ignore_ascii_case("Cargo") => {
// Look for the `login_url` parameter.
for (param, value) in challenge.params {
if param.eq_ignore_ascii_case("login_url") {
self.login_url = Some(value.to_unescaped().into_url()?);
}
}
}
Ok(challenge) => {
debug!(target: "network", "ignoring non-Cargo challenge: {}", challenge.scheme)
}
Err(e) => {
debug!(target: "network", "failed to parse challenge: {}", e)
}
}
}
}
self.auth_error_headers = result.header_map.all;
}
StatusCode::Unauthorized => {
let err = Err(HttpNotSuccessful {
code: 401,
body: result.data,
url: self.full_url(path),
ip: None,
headers: result.header_map.all,
}
.into());
if self.auth_required {
return Poll::Ready(err.context(auth::AuthorizationError {
sid: self.source_id.clone(),
default_registry: self.config.default_registry()?,
login_url: self.login_url.clone(),
reason: auth::AuthorizationErrorReason::TokenRejected,
}));
} else {
return Poll::Ready(err);
}
}
}
}
if path != Path::new(RegistryConfig::NAME) {
self.auth_required = ready!(self.config()?).auth_required;
} else if !self.auth_required {
// Check if there's a cached config that says auth is required.
// This allows avoiding the initial unauthenticated request to probe.
if let Some(config) = self.config_cached()? {
self.auth_required = config.auth_required;
}
}
if !self.config.cli_unstable().registry_auth {
self.auth_required = false;
}
// Looks like we're going to have to do a network request.
self.start_fetch()?;
let mut handle = http_handle(self.config)?;
let full_url = self.full_url(path);
debug!(target: "network", "fetch {}", full_url);
handle.get(true)?;
handle.url(&full_url)?;
handle.follow_location(true)?;
// Enable HTTP/2 if possible.
crate::try_old_curl_http2_pipewait!(self.multiplexing, handle);
let mut headers = List::new();
// Include a header to identify the protocol. This allows the server to
// know that Cargo is attempting to use the sparse protocol.
headers.append("cargo-protocol: version=1")?;
headers.append("accept: text/plain")?;
// If we have a cached copy of the file, include IF_NONE_MATCH or IF_MODIFIED_SINCE header.
if let Some(index_version) = index_version {
if let Some((key, value)) = index_version.split_once(':') {
match key {
ETAG => headers.append(&format!("{}: {}", IF_NONE_MATCH, value.trim()))?,
LAST_MODIFIED => {
headers.append(&format!("{}: {}", IF_MODIFIED_SINCE, value.trim()))?
}
_ => debug!("unexpected index version: {}", index_version),
}
}
}
if self.auth_required {
let authorization = auth::auth_token(
self.config,
&self.source_id,
self.login_url.as_ref(),
Operation::Read,
self.auth_error_headers.clone(),
)?;
headers.append(&format!("Authorization: {}", authorization))?;
trace!(target: "network", "including authorization for {}", full_url);
}
handle.http_headers(headers)?;
// We're going to have a bunch of downloads all happening "at the same time".
// So, we need some way to track what headers/data/responses are for which request.
// We do that through this token. Each request (and associated response) gets one.
let token = self.downloads.next;
self.downloads.next += 1;
debug!(target: "network", "downloading {} as {}", path.display(), token);
let is_new = self.downloads.pending_paths.insert(path.to_path_buf());
assert!(is_new, "path queued for download more than once");
// Each write should go to self.downloads.pending[&token].data.
// Since the write function must be 'static, we access downloads through a thread-local.
// That thread-local is set up in `block_until_ready` when it calls self.multi.perform,
// which is what ultimately calls this method.
handle.write_function(move |buf| {
trace!(target: "network", "{} - {} bytes of data", token, buf.len());
tls::with(|downloads| {
if let Some(downloads) = downloads {
downloads.pending[&token]
.0
.data
.borrow_mut()
.extend_from_slice(buf);
}
});
Ok(buf.len())
})?;
// And ditto for the header function.
handle.header_function(move |buf| {
if let Some((tag, value)) = Self::handle_http_header(buf) {
tls::with(|downloads| {
if let Some(downloads) = downloads {
let mut header_map = downloads.pending[&token].0.header_map.borrow_mut();
header_map.all.push(format!("{tag}: {value}"));
match tag.to_ascii_lowercase().as_str() {
LAST_MODIFIED => header_map.last_modified = Some(value.to_string()),
ETAG => header_map.etag = Some(value.to_string()),
WWW_AUTHENTICATE => header_map.www_authenticate.push(value.to_string()),
_ => {}
}
}
});
}
true
})?;
let dl = Download {
token,
path: path.to_path_buf(),
data: RefCell::new(Vec::new()),
header_map: Default::default(),
retry: Retry::new(self.config)?,
};
// Finally add the request we've lined up to the pool of requests that cURL manages.
let mut handle = self.multi.add(handle)?;
handle.set_token(token)?;
self.downloads.pending.insert(dl.token, (dl, handle));
Poll::Pending
}
fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
let mut cfg = ready!(self.config()?).clone();
if !self.config.cli_unstable().registry_auth {
cfg.auth_required = false;
}
Poll::Ready(Ok(Some(cfg)))
}
fn invalidate_cache(&mut self) {
// Actually updating the index is more or less a no-op for this implementation.
// All it does is ensure that a subsequent load will double-check files with the
// server rather than rely on a locally cached copy of the index files.
debug!("invalidated index cache");
self.fresh.clear();
self.requested_update = true;
}
fn set_quiet(&mut self, quiet: bool) {
self.quiet = quiet;
self.downloads.progress.replace(None);
}
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let registry_config = loop {
match self.config()? {
Poll::Pending => self.block_until_ready()?,
Poll::Ready(cfg) => break cfg.to_owned(),
}
};
download::download(
&self.cache_path,
&self.config,
pkg,
checksum,
registry_config,
)
}
fn finish_download(
&mut self,
pkg: PackageId,
checksum: &str,
data: &[u8],
) -> CargoResult<File> {
download::finish_download(&self.cache_path, &self.config, pkg, checksum, data)
}
fn is_crate_downloaded(&self, pkg: PackageId) -> bool {
download::is_crate_downloaded(&self.cache_path, &self.config, pkg)
}
fn block_until_ready(&mut self) -> CargoResult<()> {
trace!(target: "network",
"block_until_ready: {} transfers pending",
self.downloads.pending.len()
);
self.downloads.blocking_calls += 1;
loop {
self.handle_completed_downloads()?;
self.add_sleepers()?;
let remaining_in_multi = tls::set(&self.downloads, || {
self.multi
.perform()
.with_context(|| "failed to perform http requests")
})?;
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
if remaining_in_multi + self.downloads.sleeping.len() as u32 == 0 {
return Ok(());
}
if self.downloads.pending.is_empty() {
let delay = self.downloads.sleeping.time_to_next().unwrap();
debug!(target: "network", "sleeping main thread for {delay:?}");
std::thread::sleep(delay);
} else {
// We have no more replies to provide the caller with,
// so we need to wait until cURL has something new for us.
let timeout = self
.multi
.get_timeout()?
.unwrap_or_else(|| Duration::new(1, 0));
self.multi
.wait(&mut [], timeout)
.with_context(|| "failed to wait on curl `Multi`")?;
}
}
}
}
impl<'cfg> Downloads<'cfg> {
/// Updates the state of the progress bar for downloads.
fn tick(&self) -> CargoResult<()> {
let mut progress = self.progress.borrow_mut();
let Some(progress) = progress.as_mut() else {
return Ok(());
};
// Since the sparse protocol discovers dependencies as it goes,
// it's not possible to get an accurate progress indication.
//
// As an approximation, we assume that the depth of the dependency graph
// is fixed, and base the progress on how many times the caller has asked
// for blocking. If there are actually additional dependencies, the progress
// bar will get stuck. If there are fewer dependencies, it will disappear
// early. It will never go backwards.
//
// The status text also contains the number of completed & pending requests, which
// gives an better indication of forward progress.
let approximate_tree_depth = 10;
progress.tick(
self.blocking_calls.min(approximate_tree_depth),
approximate_tree_depth + 1,
&format!(
" {} complete; {} pending",
self.downloads_finished,
self.pending.len() + self.sleeping.len()
),
)
}
}
mod tls {
use super::Downloads;
use std::cell::Cell;
thread_local!(static PTR: Cell<usize> = Cell::new(0));
pub(super) fn with<R>(f: impl FnOnce(Option<&Downloads<'_>>) -> R) -> R {
let ptr = PTR.with(|p| p.get());
if ptr == 0 {
f(None)
} else {
// Safety: * `ptr` is only set by `set` below which ensures the type is correct.
let ptr = unsafe { &*(ptr as *const Downloads<'_>) };
f(Some(ptr))
}
}
pub(super) fn set<R>(dl: &Downloads<'_>, f: impl FnOnce() -> R) -> R {
struct Reset<'a, T: Copy>(&'a Cell<T>, T);
impl<'a, T: Copy> Drop for Reset<'a, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
PTR.with(|p| {
let _reset = Reset(p, p.get());
p.set(dl as *const Downloads<'_> as usize);
f()
})
}
} | format!("{}: {}", ETAG, etag)
} else if let Some(lm) = result.header_map.last_modified {
format!("{}: {}", LAST_MODIFIED, lm) | random_line_split |
http_remote.rs | //! Access to a HTTP-based crate registry. See [`HttpRegistry`] for details.
use crate::core::{PackageId, SourceId};
use crate::sources::registry::download;
use crate::sources::registry::MaybeLock;
use crate::sources::registry::{LoadResponse, RegistryConfig, RegistryData};
use crate::util::errors::{CargoResult, HttpNotSuccessful};
use crate::util::network::http::http_handle;
use crate::util::network::retry::{Retry, RetryResult};
use crate::util::network::sleep::SleepTracker;
use crate::util::{auth, Config, Filesystem, IntoUrl, Progress, ProgressStyle};
use anyhow::Context;
use cargo_credential::Operation;
use cargo_util::paths;
use curl::easy::{Easy, List};
use curl::multi::{EasyHandle, Multi};
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fs::{self, File};
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::str;
use std::task::{ready, Poll};
use std::time::Duration;
use tracing::{debug, trace};
use url::Url;
// HTTP headers
const ETAG: &'static str = "etag";
const LAST_MODIFIED: &'static str = "last-modified";
const WWW_AUTHENTICATE: &'static str = "www-authenticate";
const IF_NONE_MATCH: &'static str = "if-none-match";
const IF_MODIFIED_SINCE: &'static str = "if-modified-since";
const UNKNOWN: &'static str = "Unknown";
/// A registry served by the HTTP-based registry API.
///
/// This type is primarily accessed through the [`RegistryData`] trait.
///
/// `HttpRegistry` implements the HTTP-based registry API outlined in [RFC 2789]. Read the RFC for
/// the complete protocol, but _roughly_ the implementation loads each index file (e.g.,
/// config.json or re/ge/regex) from an HTTP service rather than from a locally cloned git
/// repository. The remote service can more or less be a static file server that simply serves the
/// contents of the origin git repository.
///
/// Implemented naively, this leads to a significant amount of network traffic, as a lookup of any
/// index file would need to check with the remote backend if the index file has changed. This
/// cost is somewhat mitigated by the use of HTTP conditional fetches (`If-Modified-Since` and
/// `If-None-Match` for `ETag`s) which can be efficiently handled by HTTP/2.
///
/// [RFC 2789]: https://github.com/rust-lang/rfcs/pull/2789
pub struct HttpRegistry<'cfg> {
/// Path to the registry index (`$CARGO_HOME/registry/index/$REG-HASH`).
///
/// To be fair, `HttpRegistry` doesn't store the registry index it
/// downloads on the file system, but other cached data like registry
/// configuration could be stored here.
index_path: Filesystem,
/// Path to the cache of `.crate` files (`$CARGO_HOME/registry/cache/$REG-HASH`).
cache_path: Filesystem,
/// The unique identifier of this registry source.
source_id: SourceId,
config: &'cfg Config,
/// Store the server URL without the protocol prefix (sparse+)
url: Url,
/// HTTP multi-handle for asynchronous/parallel requests.
multi: Multi,
/// Has the client requested a cache update?
///
/// Only if they have do we double-check the freshness of each locally-stored index file.
requested_update: bool,
/// State for currently pending index downloads.
downloads: Downloads<'cfg>,
/// Does the config say that we can use HTTP multiplexing?
multiplexing: bool,
/// What paths have we already fetched since the last index update?
///
/// We do not need to double-check any of these index files since we have already done so.
fresh: HashSet<PathBuf>,
/// Have we started to download any index files?
fetch_started: bool,
/// Cached registry configuration.
registry_config: Option<RegistryConfig>,
/// Should we include the authorization header?
auth_required: bool,
/// Url to get a token for the registry.
login_url: Option<Url>,
/// Headers received with an HTTP 401.
auth_error_headers: Vec<String>,
/// Disables status messages.
quiet: bool,
}
/// State for currently pending index file downloads.
struct Downloads<'cfg> {
/// When a download is started, it is added to this map. The key is a
/// "token" (see [`Download::token`]). It is removed once the download is
/// finished.
pending: HashMap<usize, (Download<'cfg>, EasyHandle)>,
/// Set of paths currently being downloaded.
/// This should stay in sync with the `pending` field.
pending_paths: HashSet<PathBuf>,
/// Downloads that have failed and are waiting to retry again later.
sleeping: SleepTracker<(Download<'cfg>, Easy)>,
/// The final result of each download.
results: HashMap<PathBuf, CargoResult<CompletedDownload>>,
/// The next ID to use for creating a token (see [`Download::token`]).
next: usize,
/// Progress bar.
progress: RefCell<Option<Progress<'cfg>>>,
/// Number of downloads that have successfully finished.
downloads_finished: usize,
/// Number of times the caller has requested blocking. This is used for
/// an estimate of progress.
blocking_calls: usize,
}
/// Represents a single index file download, including its progress and retry.
struct Download<'cfg> {
/// The token for this download, used as the key of the
/// [`Downloads::pending`] map and stored in [`EasyHandle`] as well.
token: usize,
/// The path of the package that we're downloading.
path: PathBuf,
/// Actual downloaded data, updated throughout the lifetime of this download.
data: RefCell<Vec<u8>>,
/// HTTP headers.
header_map: RefCell<Headers>,
/// Logic used to track retrying this download if it's a spurious failure.
retry: Retry<'cfg>,
}
/// HTTPS headers [`HttpRegistry`] cares about.
#[derive(Default)]
struct Headers {
last_modified: Option<String>,
etag: Option<String>,
www_authenticate: Vec<String>,
/// All headers, including explicit headers above.
all: Vec<String>,
}
/// HTTP status code [`HttpRegistry`] cares about.
enum StatusCode {
Success,
NotModified,
NotFound,
Unauthorized,
}
/// Represents a complete [`Download`] from an HTTP request.
///
/// Usually it is constructed in [`HttpRegistry::handle_completed_downloads`],
/// and then returns to the caller of [`HttpRegistry::load()`].
struct CompletedDownload {
response_code: StatusCode,
data: Vec<u8>,
header_map: Headers,
}
impl<'cfg> HttpRegistry<'cfg> {
/// Creates a HTTP-rebased remote registry for `source_id`.
///
/// * `name` --- Name of a path segment where `.crate` tarballs and the
/// registry index are stored. Expect to be unique.
pub fn new(
source_id: SourceId,
config: &'cfg Config,
name: &str,
) -> CargoResult<HttpRegistry<'cfg>> {
let url = source_id.url().as_str();
// Ensure the url ends with a slash so we can concatenate paths.
if !url.ends_with('/') {
anyhow::bail!("sparse registry url must end in a slash `/`: {url}")
}
assert!(source_id.is_sparse());
let url = url
.strip_prefix("sparse+")
.expect("sparse registry needs sparse+ prefix")
.into_url()
.expect("a url with the sparse+ stripped should still be valid");
Ok(HttpRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id,
config,
url,
multi: Multi::new(),
multiplexing: false,
downloads: Downloads {
next: 0,
pending: HashMap::new(),
pending_paths: HashSet::new(),
sleeping: SleepTracker::new(),
results: HashMap::new(),
progress: RefCell::new(Some(Progress::with_style(
"Fetch",
ProgressStyle::Indeterminate,
config,
))),
downloads_finished: 0,
blocking_calls: 0,
},
fresh: HashSet::new(),
requested_update: false,
fetch_started: false,
registry_config: None,
auth_required: false,
login_url: None,
auth_error_headers: vec![],
quiet: false,
})
}
/// Splits HTTP `HEADER: VALUE` to a tuple.
fn handle_http_header(buf: &[u8]) -> Option<(&str, &str)> {
if buf.is_empty() {
return None;
}
let buf = std::str::from_utf8(buf).ok()?.trim_end();
// Don't let server sneak extra lines anywhere.
if buf.contains('\n') {
return None;
}
let (tag, value) = buf.split_once(':')?;
let value = value.trim();
Some((tag, value))
}
/// Setup the necessary works before the first fetch gets started.
///
/// This is a no-op if called more than one time.
fn start_fetch(&mut self) -> CargoResult<()> {
if self.fetch_started {
// We only need to run the setup code once.
return Ok(());
}
self.fetch_started = true;
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
self.multiplexing = self.config.http_config()?.multiplexing.unwrap_or(true);
self.multi
.pipelining(false, self.multiplexing)
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood the server with connections
self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> |
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is never stored in the index cache",
))),
}
}
/// Moves failed [`Download`]s that are ready to retry to the pending queue.
fn add_sleepers(&mut self) -> CargoResult<()> {
for (dl, handle) in self.downloads.sleeping.to_retry() {
let mut handle = self.multi.add(handle)?;
handle.set_token(dl.token)?;
let is_new = self.downloads.pending_paths.insert(dl.path.to_path_buf());
assert!(is_new, "path queued for download more than once");
let previous = self.downloads.pending.insert(dl.token, (dl, handle));
assert!(previous.is_none(), "dl token queued more than once");
}
Ok(())
}
}
impl<'cfg> RegistryData for HttpRegistry<'cfg> {
fn prepare(&self) -> CargoResult<()> {
Ok(())
}
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
self.config.assert_package_cache_locked(path)
}
fn is_updated(&self) -> bool {
self.requested_update
}
fn load(
&mut self,
_root: &Path,
path: &Path,
index_version: Option<&str>,
) -> Poll<CargoResult<LoadResponse>> {
trace!("load: {}", path.display());
if let Some(_token) = self.downloads.pending_paths.get(path) {
debug!("dependency is still pending: {}", path.display());
return Poll::Pending;
}
if let Some(index_version) = index_version {
trace!(
"local cache of {} is available at version `{}`",
path.display(),
index_version
);
if self.is_fresh(path) {
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
} else if self.fresh.contains(path) {
// We have no cached copy of this file, and we already downloaded it.
debug!(
"cache did not contain previously downloaded file {}",
path.display()
);
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if self.config.offline() || self.config.cli_unstable().no_index_update {
// Return NotFound in offline mode when the file doesn't exist in the cache.
// If this results in resolution failure, the resolver will suggest
// removing the --offline flag.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if let Some(result) = self.downloads.results.remove(path) {
let result =
result.with_context(|| format!("download of {} failed", path.display()))?;
let is_new = self.fresh.insert(path.to_path_buf());
assert!(
is_new,
"downloaded the index file `{}` twice",
path.display()
);
// The status handled here need to be kept in sync with the codes handled
// in `handle_completed_downloads`
match result.response_code {
StatusCode::Success => {
let response_index_version = if let Some(etag) = result.header_map.etag {
format!("{}: {}", ETAG, etag)
} else if let Some(lm) = result.header_map.last_modified {
format!("{}: {}", LAST_MODIFIED, lm)
} else {
UNKNOWN.to_string()
};
trace!("index file version: {}", response_index_version);
return Poll::Ready(Ok(LoadResponse::Data {
raw_data: result.data,
index_version: Some(response_index_version),
}));
}
StatusCode::NotModified => {
// Not Modified: the data in the cache is still the latest.
if index_version.is_none() {
return Poll::Ready(Err(anyhow::anyhow!(
"server said not modified (HTTP 304) when no local cache exists"
)));
}
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
StatusCode::NotFound => {
// The crate was not found or deleted from the registry.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
StatusCode::Unauthorized
if !self.auth_required
&& path == Path::new(RegistryConfig::NAME)
&& self.config.cli_unstable().registry_auth =>
{
debug!(target: "network", "re-attempting request for config.json with authorization included.");
self.fresh.remove(path);
self.auth_required = true;
// Look for a `www-authenticate` header with the `Cargo` scheme.
for header in &result.header_map.www_authenticate {
for challenge in http_auth::ChallengeParser::new(header) {
match challenge {
Ok(challenge) if challenge.scheme.eq_ignore_ascii_case("Cargo") => {
// Look for the `login_url` parameter.
for (param, value) in challenge.params {
if param.eq_ignore_ascii_case("login_url") {
self.login_url = Some(value.to_unescaped().into_url()?);
}
}
}
Ok(challenge) => {
debug!(target: "network", "ignoring non-Cargo challenge: {}", challenge.scheme)
}
Err(e) => {
debug!(target: "network", "failed to parse challenge: {}", e)
}
}
}
}
self.auth_error_headers = result.header_map.all;
}
StatusCode::Unauthorized => {
let err = Err(HttpNotSuccessful {
code: 401,
body: result.data,
url: self.full_url(path),
ip: None,
headers: result.header_map.all,
}
.into());
if self.auth_required {
return Poll::Ready(err.context(auth::AuthorizationError {
sid: self.source_id.clone(),
default_registry: self.config.default_registry()?,
login_url: self.login_url.clone(),
reason: auth::AuthorizationErrorReason::TokenRejected,
}));
} else {
return Poll::Ready(err);
}
}
}
}
if path != Path::new(RegistryConfig::NAME) {
self.auth_required = ready!(self.config()?).auth_required;
} else if !self.auth_required {
// Check if there's a cached config that says auth is required.
// This allows avoiding the initial unauthenticated request to probe.
if let Some(config) = self.config_cached()? {
self.auth_required = config.auth_required;
}
}
if !self.config.cli_unstable().registry_auth {
self.auth_required = false;
}
// Looks like we're going to have to do a network request.
self.start_fetch()?;
let mut handle = http_handle(self.config)?;
let full_url = self.full_url(path);
debug!(target: "network", "fetch {}", full_url);
handle.get(true)?;
handle.url(&full_url)?;
handle.follow_location(true)?;
// Enable HTTP/2 if possible.
crate::try_old_curl_http2_pipewait!(self.multiplexing, handle);
let mut headers = List::new();
// Include a header to identify the protocol. This allows the server to
// know that Cargo is attempting to use the sparse protocol.
headers.append("cargo-protocol: version=1")?;
headers.append("accept: text/plain")?;
// If we have a cached copy of the file, include IF_NONE_MATCH or IF_MODIFIED_SINCE header.
if let Some(index_version) = index_version {
if let Some((key, value)) = index_version.split_once(':') {
match key {
ETAG => headers.append(&format!("{}: {}", IF_NONE_MATCH, value.trim()))?,
LAST_MODIFIED => {
headers.append(&format!("{}: {}", IF_MODIFIED_SINCE, value.trim()))?
}
_ => debug!("unexpected index version: {}", index_version),
}
}
}
if self.auth_required {
let authorization = auth::auth_token(
self.config,
&self.source_id,
self.login_url.as_ref(),
Operation::Read,
self.auth_error_headers.clone(),
)?;
headers.append(&format!("Authorization: {}", authorization))?;
trace!(target: "network", "including authorization for {}", full_url);
}
handle.http_headers(headers)?;
// We're going to have a bunch of downloads all happening "at the same time".
// So, we need some way to track what headers/data/responses are for which request.
// We do that through this token. Each request (and associated response) gets one.
let token = self.downloads.next;
self.downloads.next += 1;
debug!(target: "network", "downloading {} as {}", path.display(), token);
let is_new = self.downloads.pending_paths.insert(path.to_path_buf());
assert!(is_new, "path queued for download more than once");
// Each write should go to self.downloads.pending[&token].data.
// Since the write function must be 'static, we access downloads through a thread-local.
// That thread-local is set up in `block_until_ready` when it calls self.multi.perform,
// which is what ultimately calls this method.
handle.write_function(move |buf| {
trace!(target: "network", "{} - {} bytes of data", token, buf.len());
tls::with(|downloads| {
if let Some(downloads) = downloads {
downloads.pending[&token]
.0
.data
.borrow_mut()
.extend_from_slice(buf);
}
});
Ok(buf.len())
})?;
// And ditto for the header function.
handle.header_function(move |buf| {
if let Some((tag, value)) = Self::handle_http_header(buf) {
tls::with(|downloads| {
if let Some(downloads) = downloads {
let mut header_map = downloads.pending[&token].0.header_map.borrow_mut();
header_map.all.push(format!("{tag}: {value}"));
match tag.to_ascii_lowercase().as_str() {
LAST_MODIFIED => header_map.last_modified = Some(value.to_string()),
ETAG => header_map.etag = Some(value.to_string()),
WWW_AUTHENTICATE => header_map.www_authenticate.push(value.to_string()),
_ => {}
}
}
});
}
true
})?;
let dl = Download {
token,
path: path.to_path_buf(),
data: RefCell::new(Vec::new()),
header_map: Default::default(),
retry: Retry::new(self.config)?,
};
// Finally add the request we've lined up to the pool of requests that cURL manages.
let mut handle = self.multi.add(handle)?;
handle.set_token(token)?;
self.downloads.pending.insert(dl.token, (dl, handle));
Poll::Pending
}
fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
let mut cfg = ready!(self.config()?).clone();
if !self.config.cli_unstable().registry_auth {
cfg.auth_required = false;
}
Poll::Ready(Ok(Some(cfg)))
}
fn invalidate_cache(&mut self) {
// Actually updating the index is more or less a no-op for this implementation.
// All it does is ensure that a subsequent load will double-check files with the
// server rather than rely on a locally cached copy of the index files.
debug!("invalidated index cache");
self.fresh.clear();
self.requested_update = true;
}
fn set_quiet(&mut self, quiet: bool) {
self.quiet = quiet;
self.downloads.progress.replace(None);
}
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let registry_config = loop {
match self.config()? {
Poll::Pending => self.block_until_ready()?,
Poll::Ready(cfg) => break cfg.to_owned(),
}
};
download::download(
&self.cache_path,
&self.config,
pkg,
checksum,
registry_config,
)
}
fn finish_download(
&mut self,
pkg: PackageId,
checksum: &str,
data: &[u8],
) -> CargoResult<File> {
download::finish_download(&self.cache_path, &self.config, pkg, checksum, data)
}
fn is_crate_downloaded(&self, pkg: PackageId) -> bool {
download::is_crate_downloaded(&self.cache_path, &self.config, pkg)
}
fn block_until_ready(&mut self) -> CargoResult<()> {
trace!(target: "network",
"block_until_ready: {} transfers pending",
self.downloads.pending.len()
);
self.downloads.blocking_calls += 1;
loop {
self.handle_completed_downloads()?;
self.add_sleepers()?;
let remaining_in_multi = tls::set(&self.downloads, || {
self.multi
.perform()
.with_context(|| "failed to perform http requests")
})?;
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
if remaining_in_multi + self.downloads.sleeping.len() as u32 == 0 {
return Ok(());
}
if self.downloads.pending.is_empty() {
let delay = self.downloads.sleeping.time_to_next().unwrap();
debug!(target: "network", "sleeping main thread for {delay:?}");
std::thread::sleep(delay);
} else {
// We have no more replies to provide the caller with,
// so we need to wait until cURL has something new for us.
let timeout = self
.multi
.get_timeout()?
.unwrap_or_else(|| Duration::new(1, 0));
self.multi
.wait(&mut [], timeout)
.with_context(|| "failed to wait on curl `Multi`")?;
}
}
}
}
impl<'cfg> Downloads<'cfg> {
/// Updates the state of the progress bar for downloads.
fn tick(&self) -> CargoResult<()> {
let mut progress = self.progress.borrow_mut();
let Some(progress) = progress.as_mut() else {
return Ok(());
};
// Since the sparse protocol discovers dependencies as it goes,
// it's not possible to get an accurate progress indication.
//
// As an approximation, we assume that the depth of the dependency graph
// is fixed, and base the progress on how many times the caller has asked
// for blocking. If there are actually additional dependencies, the progress
// bar will get stuck. If there are fewer dependencies, it will disappear
// early. It will never go backwards.
//
// The status text also contains the number of completed & pending requests, which
// gives an better indication of forward progress.
let approximate_tree_depth = 10;
progress.tick(
self.blocking_calls.min(approximate_tree_depth),
approximate_tree_depth + 1,
&format!(
" {} complete; {} pending",
self.downloads_finished,
self.pending.len() + self.sleeping.len()
),
)
}
}
mod tls {
use super::Downloads;
use std::cell::Cell;
thread_local!(static PTR: Cell<usize> = Cell::new(0));
pub(super) fn with<R>(f: impl FnOnce(Option<&Downloads<'_>>) -> R) -> R {
let ptr = PTR.with(|p| p.get());
if ptr == 0 {
f(None)
} else {
// Safety: * `ptr` is only set by `set` below which ensures the type is correct.
let ptr = unsafe { &*(ptr as *const Downloads<'_>) };
f(Some(ptr))
}
}
pub(super) fn set<R>(dl: &Downloads<'_>, f: impl FnOnce() -> R) -> R {
struct Reset<'a, T: Copy>(&'a Cell<T>, T);
impl<'a, T: Copy> Drop for Reset<'a, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
PTR.with(|p| {
let _reset = Reset(p, p.get());
p.set(dl as *const Downloads<'_> as usize);
f()
})
}
}
| {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results {
let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
} | identifier_body |
lexer.rs | //! Lexer implementation
use std::borrow::Cow::Borrowed;
use std::borrow::{Borrow, Cow};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use crate::char_stream::{CharStream, InputData};
use crate::error_listener::{ConsoleErrorListener, ErrorListener};
use crate::errors::ANTLRError;
use crate::int_stream::IntStream;
use crate::lexer_atn_simulator::{ILexerATNSimulator, LexerATNSimulator};
use crate::parser::ParserNodeType;
use crate::recognizer::{Actions, Recognizer};
use crate::rule_context::EmptyContextType;
use crate::token::TOKEN_INVALID_TYPE;
use crate::token_factory::{CommonTokenFactory, TokenAware, TokenFactory};
use crate::token_source::TokenSource;
use std::ops::{Deref, DerefMut};
/// Lexer functionality required by `LexerATNSimulator` to work properly
pub trait Lexer<'input>:
TokenSource<'input>
+ Recognizer<'input, Node = EmptyContextType<'input, <Self as TokenAware<'input>>::TF>>
{
/// Concrete input stream used by this parser
type Input: IntStream;
/// Same as `TokenStream::get_input_stream` but returns concrete type instance
/// important for proper inlining in hot code of `LexerATNSimulator`
fn input(&mut self) -> &mut Self::Input;
/// Sets channel where current token will be pushed
///
/// By default two channels are available:
/// - `LEXER_DEFAULT_TOKEN_CHANNEL`
/// - `LEXER_HIDDEN`
fn set_channel(&mut self, v: isize);
/// Pushes current mode to internal mode stack and sets `m` as current lexer mode
/// `pop_mode should be used to recover previous mode
fn push_mode(&mut self, m: usize);
/// Pops mode from internal mode stack
fn pop_mode(&mut self) -> Option<usize>;
/// Sets type of the current token
/// Called from action to override token that will be emitted by lexer
fn set_type(&mut self, t: isize);
/// Sets lexer mode discarding current one
fn set_mode(&mut self, m: usize);
/// Used to informs lexer that it should consider next token as a continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target |
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF {
self.hit_eof = true;
}
if self.token_type == TOKEN_INVALID_TYPE {
self.token_type = ttype;
}
if self.token_type == LEXER_SKIP {
continue 'outer;
}
if self.token_type != LEXER_MORE {
break;
}
}
if self.token.is_none() {
self.emit();
break;
}
}
self.input().release(_marker);
self.token.take().unwrap()
}
fn get_line(&self) -> isize {
self.current_pos.line.get()
}
fn get_char_position_in_line(&self) -> isize {
self.current_pos.char_position_in_line.get()
}
fn get_input_stream(&mut self) -> Option<&mut dyn IntStream> {
match &mut self.input {
None => None,
Some(x) => Some(x as _),
}
}
fn get_source_name(&self) -> String {
self.input
.as_ref()
.map(|it| it.get_source_name())
.unwrap_or("<none>".to_string())
}
// fn set_token_factory<'c: 'b>(&mut self, f: &'c TokenFactory) {
// self.factory = f;
// }
fn get_token_factory(&self) -> &'input TF {
self.factory
}
}
#[cold]
#[inline(never)]
fn notify_listeners<'input, T, Input, TF>(
liseners: &mut Vec<Box<dyn ErrorListener<'input, BaseLexer<'input, T, Input, TF>>>>,
e: &ANTLRError,
lexer: &BaseLexer<'input, T, Input, TF>,
) where
T: LexerRecog<'input, BaseLexer<'input, T, Input, TF>> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
let inner = lexer
.input
.as_ref()
.unwrap()
.get_text(lexer.token_start_char_index, lexer.get_char_index());
let text = format!(
"token recognition error at: '{}'",
TF::get_data(inner).to_display()
);
for listener in liseners.iter_mut() {
listener.syntax_error(
lexer,
None,
lexer.token_start_line,
lexer.token_start_column,
&text,
Some(e),
)
}
}
impl<'input, T, Input, TF> Lexer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Input = Input;
fn input(&mut self) -> &mut Self::Input {
self.input.as_mut().unwrap()
}
fn set_channel(&mut self, v: isize) {
self.channel = v;
}
fn push_mode(&mut self, m: usize) {
self.mode_stack.push(self.mode);
self.mode = m;
}
fn pop_mode(&mut self) -> Option<usize> {
self.mode_stack.pop().map(|mode| {
self.mode = mode;
mode
})
}
fn set_type(&mut self, t: isize) {
self.token_type = t;
}
fn set_mode(&mut self, m: usize) {
self.mode = m;
}
fn more(&mut self) {
self.set_type(LEXER_MORE)
}
fn skip(&mut self) {
self.set_type(LEXER_SKIP)
}
fn reset(&mut self) {
unimplemented!()
}
fn get_interpreter(&self) -> Option<&LexerATNSimulator> {
self.interpreter.as_deref()
}
}
| {
&self.recog
} | identifier_body |
lexer.rs | //! Lexer implementation
use std::borrow::Cow::Borrowed;
use std::borrow::{Borrow, Cow};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use crate::char_stream::{CharStream, InputData};
use crate::error_listener::{ConsoleErrorListener, ErrorListener};
use crate::errors::ANTLRError;
use crate::int_stream::IntStream;
use crate::lexer_atn_simulator::{ILexerATNSimulator, LexerATNSimulator};
use crate::parser::ParserNodeType;
use crate::recognizer::{Actions, Recognizer};
use crate::rule_context::EmptyContextType;
use crate::token::TOKEN_INVALID_TYPE;
use crate::token_factory::{CommonTokenFactory, TokenAware, TokenFactory};
use crate::token_source::TokenSource;
use std::ops::{Deref, DerefMut};
/// Lexer functionality required by `LexerATNSimulator` to work properly
pub trait Lexer<'input>:
TokenSource<'input>
+ Recognizer<'input, Node = EmptyContextType<'input, <Self as TokenAware<'input>>::TF>>
{
/// Concrete input stream used by this parser
type Input: IntStream;
/// Same as `TokenStream::get_input_stream` but returns concrete type instance
/// important for proper inlining in hot code of `LexerATNSimulator`
fn input(&mut self) -> &mut Self::Input;
/// Sets channel where current token will be pushed
///
/// By default two channels are available:
/// - `LEXER_DEFAULT_TOKEN_CHANNEL`
/// - `LEXER_HIDDEN`
fn set_channel(&mut self, v: isize);
/// Pushes current mode to internal mode stack and sets `m` as current lexer mode
/// `pop_mode should be used to recover previous mode
fn push_mode(&mut self, m: usize);
/// Pops mode from internal mode stack
fn pop_mode(&mut self) -> Option<usize>;
/// Sets type of the current token
/// Called from action to override token that will be emitted by lexer
fn set_type(&mut self, t: isize);
/// Sets lexer mode discarding current one
fn set_mode(&mut self, m: usize);
/// Used to informs lexer that it should consider next token as a continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.recog
}
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF |
if self.token_type == TOKEN_INVALID_TYPE {
self.token_type = ttype;
}
if self.token_type == LEXER_SKIP {
continue 'outer;
}
if self.token_type != LEXER_MORE {
break;
}
}
if self.token.is_none() {
self.emit();
break;
}
}
self.input().release(_marker);
self.token.take().unwrap()
}
fn get_line(&self) -> isize {
self.current_pos.line.get()
}
fn get_char_position_in_line(&self) -> isize {
self.current_pos.char_position_in_line.get()
}
fn get_input_stream(&mut self) -> Option<&mut dyn IntStream> {
match &mut self.input {
None => None,
Some(x) => Some(x as _),
}
}
fn get_source_name(&self) -> String {
self.input
.as_ref()
.map(|it| it.get_source_name())
.unwrap_or("<none>".to_string())
}
// fn set_token_factory<'c: 'b>(&mut self, f: &'c TokenFactory) {
// self.factory = f;
// }
fn get_token_factory(&self) -> &'input TF {
self.factory
}
}
#[cold]
#[inline(never)]
fn notify_listeners<'input, T, Input, TF>(
liseners: &mut Vec<Box<dyn ErrorListener<'input, BaseLexer<'input, T, Input, TF>>>>,
e: &ANTLRError,
lexer: &BaseLexer<'input, T, Input, TF>,
) where
T: LexerRecog<'input, BaseLexer<'input, T, Input, TF>> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
let inner = lexer
.input
.as_ref()
.unwrap()
.get_text(lexer.token_start_char_index, lexer.get_char_index());
let text = format!(
"token recognition error at: '{}'",
TF::get_data(inner).to_display()
);
for listener in liseners.iter_mut() {
listener.syntax_error(
lexer,
None,
lexer.token_start_line,
lexer.token_start_column,
&text,
Some(e),
)
}
}
impl<'input, T, Input, TF> Lexer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Input = Input;
fn input(&mut self) -> &mut Self::Input {
self.input.as_mut().unwrap()
}
fn set_channel(&mut self, v: isize) {
self.channel = v;
}
fn push_mode(&mut self, m: usize) {
self.mode_stack.push(self.mode);
self.mode = m;
}
fn pop_mode(&mut self) -> Option<usize> {
self.mode_stack.pop().map(|mode| {
self.mode = mode;
mode
})
}
fn set_type(&mut self, t: isize) {
self.token_type = t;
}
fn set_mode(&mut self, m: usize) {
self.mode = m;
}
fn more(&mut self) {
self.set_type(LEXER_MORE)
}
fn skip(&mut self) {
self.set_type(LEXER_SKIP)
}
fn reset(&mut self) {
unimplemented!()
}
fn get_interpreter(&self) -> Option<&LexerATNSimulator> {
self.interpreter.as_deref()
}
}
| {
self.hit_eof = true;
} | conditional_block |
lexer.rs | //! Lexer implementation
use std::borrow::Cow::Borrowed;
use std::borrow::{Borrow, Cow};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use crate::char_stream::{CharStream, InputData};
use crate::error_listener::{ConsoleErrorListener, ErrorListener};
use crate::errors::ANTLRError;
use crate::int_stream::IntStream;
use crate::lexer_atn_simulator::{ILexerATNSimulator, LexerATNSimulator};
use crate::parser::ParserNodeType;
use crate::recognizer::{Actions, Recognizer};
use crate::rule_context::EmptyContextType;
use crate::token::TOKEN_INVALID_TYPE;
use crate::token_factory::{CommonTokenFactory, TokenAware, TokenFactory};
use crate::token_source::TokenSource;
use std::ops::{Deref, DerefMut};
/// Lexer functionality required by `LexerATNSimulator` to work properly
pub trait Lexer<'input>:
TokenSource<'input>
+ Recognizer<'input, Node = EmptyContextType<'input, <Self as TokenAware<'input>>::TF>>
{
/// Concrete input stream used by this parser
type Input: IntStream;
/// Same as `TokenStream::get_input_stream` but returns concrete type instance
/// important for proper inlining in hot code of `LexerATNSimulator`
fn input(&mut self) -> &mut Self::Input;
/// Sets channel where current token will be pushed
///
/// By default two channels are available:
/// - `LEXER_DEFAULT_TOKEN_CHANNEL`
/// - `LEXER_HIDDEN`
fn set_channel(&mut self, v: isize);
/// Pushes current mode to internal mode stack and sets `m` as current lexer mode
/// `pop_mode should be used to recover previous mode
fn push_mode(&mut self, m: usize);
/// Pops mode from internal mode stack
fn pop_mode(&mut self) -> Option<usize>;
/// Sets type of the current token
/// Called from action to override token that will be emitted by lexer
fn set_type(&mut self, t: isize);
/// Sets lexer mode discarding current one
fn set_mode(&mut self, m: usize);
/// Used to informs lexer that it should consider next token as a continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.recog
}
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF {
self.hit_eof = true;
}
if self.token_type == TOKEN_INVALID_TYPE {
self.token_type = ttype;
}
if self.token_type == LEXER_SKIP {
continue 'outer;
}
if self.token_type != LEXER_MORE {
break;
}
}
if self.token.is_none() {
self.emit();
break;
}
}
self.input().release(_marker);
self.token.take().unwrap()
}
fn get_line(&self) -> isize {
self.current_pos.line.get()
}
fn get_char_position_in_line(&self) -> isize {
self.current_pos.char_position_in_line.get()
}
fn get_input_stream(&mut self) -> Option<&mut dyn IntStream> {
match &mut self.input {
None => None,
Some(x) => Some(x as _),
}
}
fn get_source_name(&self) -> String {
self.input
.as_ref()
.map(|it| it.get_source_name())
.unwrap_or("<none>".to_string())
}
// fn set_token_factory<'c: 'b>(&mut self, f: &'c TokenFactory) {
// self.factory = f;
// }
fn get_token_factory(&self) -> &'input TF {
self.factory
}
}
#[cold]
#[inline(never)]
fn notify_listeners<'input, T, Input, TF>(
liseners: &mut Vec<Box<dyn ErrorListener<'input, BaseLexer<'input, T, Input, TF>>>>,
e: &ANTLRError,
lexer: &BaseLexer<'input, T, Input, TF>,
) where
T: LexerRecog<'input, BaseLexer<'input, T, Input, TF>> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
let inner = lexer
.input
.as_ref()
.unwrap()
.get_text(lexer.token_start_char_index, lexer.get_char_index());
let text = format!(
"token recognition error at: '{}'",
TF::get_data(inner).to_display()
);
for listener in liseners.iter_mut() { | lexer.token_start_column,
&text,
Some(e),
)
}
}
impl<'input, T, Input, TF> Lexer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Input = Input;
fn input(&mut self) -> &mut Self::Input {
self.input.as_mut().unwrap()
}
fn set_channel(&mut self, v: isize) {
self.channel = v;
}
fn push_mode(&mut self, m: usize) {
self.mode_stack.push(self.mode);
self.mode = m;
}
fn pop_mode(&mut self) -> Option<usize> {
self.mode_stack.pop().map(|mode| {
self.mode = mode;
mode
})
}
fn set_type(&mut self, t: isize) {
self.token_type = t;
}
fn set_mode(&mut self, m: usize) {
self.mode = m;
}
fn more(&mut self) {
self.set_type(LEXER_MORE)
}
fn skip(&mut self) {
self.set_type(LEXER_SKIP)
}
fn reset(&mut self) {
unimplemented!()
}
fn get_interpreter(&self) -> Option<&LexerATNSimulator> {
self.interpreter.as_deref()
}
} | listener.syntax_error(
lexer,
None,
lexer.token_start_line, | random_line_split |
lexer.rs | //! Lexer implementation
use std::borrow::Cow::Borrowed;
use std::borrow::{Borrow, Cow};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use crate::char_stream::{CharStream, InputData};
use crate::error_listener::{ConsoleErrorListener, ErrorListener};
use crate::errors::ANTLRError;
use crate::int_stream::IntStream;
use crate::lexer_atn_simulator::{ILexerATNSimulator, LexerATNSimulator};
use crate::parser::ParserNodeType;
use crate::recognizer::{Actions, Recognizer};
use crate::rule_context::EmptyContextType;
use crate::token::TOKEN_INVALID_TYPE;
use crate::token_factory::{CommonTokenFactory, TokenAware, TokenFactory};
use crate::token_source::TokenSource;
use std::ops::{Deref, DerefMut};
/// Lexer functionality required by `LexerATNSimulator` to work properly
pub trait Lexer<'input>:
TokenSource<'input>
+ Recognizer<'input, Node = EmptyContextType<'input, <Self as TokenAware<'input>>::TF>>
{
/// Concrete input stream used by this parser
type Input: IntStream;
/// Same as `TokenStream::get_input_stream` but returns concrete type instance
/// important for proper inlining in hot code of `LexerATNSimulator`
fn input(&mut self) -> &mut Self::Input;
/// Sets channel where current token will be pushed
///
/// By default two channels are available:
/// - `LEXER_DEFAULT_TOKEN_CHANNEL`
/// - `LEXER_HIDDEN`
fn set_channel(&mut self, v: isize);
/// Pushes current mode to internal mode stack and sets `m` as current lexer mode
/// `pop_mode should be used to recover previous mode
fn push_mode(&mut self, m: usize);
/// Pops mode from internal mode stack
fn pop_mode(&mut self) -> Option<usize>;
/// Sets type of the current token
/// Called from action to override token that will be emitted by lexer
fn set_type(&mut self, t: isize);
/// Sets lexer mode discarding current one
fn set_mode(&mut self, m: usize);
/// Used to informs lexer that it should consider next token as a continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.recog
}
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn | (&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF {
self.hit_eof = true;
}
if self.token_type == TOKEN_INVALID_TYPE {
self.token_type = ttype;
}
if self.token_type == LEXER_SKIP {
continue 'outer;
}
if self.token_type != LEXER_MORE {
break;
}
}
if self.token.is_none() {
self.emit();
break;
}
}
self.input().release(_marker);
self.token.take().unwrap()
}
fn get_line(&self) -> isize {
self.current_pos.line.get()
}
fn get_char_position_in_line(&self) -> isize {
self.current_pos.char_position_in_line.get()
}
fn get_input_stream(&mut self) -> Option<&mut dyn IntStream> {
match &mut self.input {
None => None,
Some(x) => Some(x as _),
}
}
fn get_source_name(&self) -> String {
self.input
.as_ref()
.map(|it| it.get_source_name())
.unwrap_or("<none>".to_string())
}
// fn set_token_factory<'c: 'b>(&mut self, f: &'c TokenFactory) {
// self.factory = f;
// }
fn get_token_factory(&self) -> &'input TF {
self.factory
}
}
#[cold]
#[inline(never)]
fn notify_listeners<'input, T, Input, TF>(
liseners: &mut Vec<Box<dyn ErrorListener<'input, BaseLexer<'input, T, Input, TF>>>>,
e: &ANTLRError,
lexer: &BaseLexer<'input, T, Input, TF>,
) where
T: LexerRecog<'input, BaseLexer<'input, T, Input, TF>> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
let inner = lexer
.input
.as_ref()
.unwrap()
.get_text(lexer.token_start_char_index, lexer.get_char_index());
let text = format!(
"token recognition error at: '{}'",
TF::get_data(inner).to_display()
);
for listener in liseners.iter_mut() {
listener.syntax_error(
lexer,
None,
lexer.token_start_line,
lexer.token_start_column,
&text,
Some(e),
)
}
}
impl<'input, T, Input, TF> Lexer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Input = Input;
fn input(&mut self) -> &mut Self::Input {
self.input.as_mut().unwrap()
}
fn set_channel(&mut self, v: isize) {
self.channel = v;
}
fn push_mode(&mut self, m: usize) {
self.mode_stack.push(self.mode);
self.mode = m;
}
fn pop_mode(&mut self) -> Option<usize> {
self.mode_stack.pop().map(|mode| {
self.mode = mode;
mode
})
}
fn set_type(&mut self, t: isize) {
self.token_type = t;
}
fn set_mode(&mut self, m: usize) {
self.mode = m;
}
fn more(&mut self) {
self.set_type(LEXER_MORE)
}
fn skip(&mut self) {
self.set_type(LEXER_SKIP)
}
fn reset(&mut self) {
unimplemented!()
}
fn get_interpreter(&self) -> Option<&LexerATNSimulator> {
self.interpreter.as_deref()
}
}
| set_text | identifier_name |
app_2_wl.py | import streamlit as st
# Base packages
import pandas as pd
import numpy as np
import datetime
import altair as alt
import matplotlib.pyplot as plt
# Find coordinates
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="myapp2")
import time
# Plot static maps
import cartopy.crs as ccrs
import cartopy.feature as cfeature
# Plot interactive maps
import geopandas as gpd
from shapely import wkt
from bokeh.io import output_notebook, show, output_file
from bokeh.plotting import figure
from bokeh.models import GeoJSONDataSource, ColumnDataSource
import json
from bokeh.models import HoverTool
import math
from scipy.optimize import curve_fit
import plotly.express as px
st.header(" Xibaar yu aju ci Jangorëy Koronaa ci Senegal 🇸🇳")
st.sidebar.markdown("*yeesal gu muj: 25/03/2020*")
st.sidebar.markdown("---")
st.sidebar.header("Ressources utiles")
st.sidebar.markdown("Numero guir woté bu jamp bu jeuk: **78 172 10 81**")
st.sidebar.markdown("Numero guir woté bu jamp ñaaréle: **76 765 97 31**")
st.sidebar.markdown("Numero guir woté bu jamp ñeetéle: **70 717 14 92**")
st.sidebar.markdown("Numero boye woté té do fayye bu ministere: **800 00 50 50**")
st.sidebar.markdown("Samu: **1515**")
st.sidebar.markdown("Besel ci sa telefone : **#2121#**")
st.sidebar.markdown("[Saytul say sa yarame ci Jangoroji ci Prevcovid19](http://www.prevcovid19.com/#/teste)")
st.sidebar.markdown("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)")
st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)")
st.sidebar.markdown("---")
st.sidebar.header("Jokko ak wa ministere")
st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence")
st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal")
st.sidebar.markdown("+221 800 00 50 50 - [email protected]")
st.sidebar.markdown("---")
st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)")
# I. Dataframe
df = pd.read_csv("COVID_Dakar.csv", sep=";")
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
#st.write(df)
evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum()
st.subheader("Ci tënkk")
total_positif = evol_cases.tail(1)['Positif'][0]
total_negatif = evol_cases.tail(1)['Negatif'][0]
total_decede = evol_cases.tail(1)['Décédé'][0]
total_geuri = evol_cases.tail(1)['Guéri'][0]
st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True)
st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True)
st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True)
st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True)
st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True)
st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True)
st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True)
# II. Map
st.markdown("---")
st.subheader("ñi ame feebar bi fu ñu féete")
shapefile = 'app/ne_110m_admin_0_countries.shp'
#Read shapefile using Geopandas
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
gdf.columns = ['country', 'country_code', 'geometry']
gdf = gdf[gdf['country']=="Senegal"]
grid_crs=gdf.crs
gdf_json = json.loads(gdf.to_json())
grid = json.dumps(gdf_json)
cities = pd.read_csv("city_coordinates.csv", index_col=0)
def find_lat(x):
try:
return float(cities[cities['Ville'] == x]['Latitude'])
except TypeError:
return None
def find_long(x):
try:
return float(cities[cities['Ville'] == x]['Longitude'])
except TypeError:
return None
summary = df[['Positif', 'Ville']].groupby("Ville").sum().reset_index()
summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x))
summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x))
geosource = GeoJSONDataSource(geojson = grid)
pointsource = ColumnDataSource(summary)
hover = HoverTool(
tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')]
)
#Create figure object.
p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom'])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.visible = False
p.yaxis.visible = False
p.outline_line_color = None
patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc',
line_color = 'black', line_width = 0.35, fill_alpha = 1,
hover_fill_color="#fec44f")
#Add patch renderer to figure.
patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey',
line_color = 'black', line_width = 0.25, fill_alpha = 1)
p.circle('longitude','latitude',source=pointsource, size=15)
st.bokeh_chart(p)
# III. Map
st.markdown("---")
st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal")
highlight = alt.selection(type='single', on='mouseover',
fields=['Positif'], nearest=True)
chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode(
x='Date:T',
y='Positif:Q',
tooltip='Positif:Q'
).add_selection(
highlight
).properties(height=400, width=700)
st.write(chart.interactive())
st.markdown("---")
st.subheader("Mingalé rewu Pays-Bas")
st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands")
df_nl = pd.read_csv("df_nl.csv")
plt.figure(figsize=(16,10))
plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas")
plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5)
plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center')
plt.legend()
st.pyplot(plt)
# IV. Contamination
|
facteur = df[['Date', 'Facteur']].dropna()
facteur['Count'] = 1
importe = facteur[facteur['Facteur'] == "Importé"].groupby("Date").sum().cumsum().reset_index()
voyage = facteur[facteur['Facteur'] == "Contact"].groupby("Date").sum().cumsum().reset_index()
communaute = facteur[facteur['Facteur'] == "Communauté"].groupby("Date").sum().cumsum().reset_index()
df_int = pd.merge(importe, voyage, left_on='Date', right_on='Date', how='outer')
df_int = pd.merge(df_int, communaute, left_on='Date', right_on='Date', how='outer')
df_int['Date'] = pd.to_datetime(df_int['Date'], dayfirst=True)
df_int = df_int.sort_values("Date").ffill().fillna(0)
df_int.columns = ["Date", "Importes", "Contact", "Communauté"]
ch0 = alt.Chart(df_int).transform_fold(
['Importes', 'Contact', 'Communauté'],
).mark_line(size=5).encode(
x='Date:T',
y='value:Q',
color='key:N'
).properties(height=500, width=700)
st.altair_chart(ch0)
st.write("Ñu dieulé Jangoroji bitimeu rew, té waleu Jangoroji ñeneu ñu dëkk Senegaal, Ñugui jugué ci rew yi :")
ch3 = alt.Chart(df.dropna(subset=['Source/Voyage'])).mark_bar().encode(
x = 'Source/Voyage:N',
y=alt.Y('count()', title='Nombre de patients')
).properties(title="Provenance des malades", height=300, width=700)
st.write(ch3)
# Interactive Map
st.write("Natalu feega xamené fila jangorey koronaa bi jugué:")
df3 = px.data.gapminder().query("year == 2007")
df2 = df3[(df3['country']=="Italy") | (df3['country']=="Senegal") | (df3['country']=="United Kingdom") | (df3['country']=="France") | (df3['country']=="Spain")]
fig = px.line_geo(df2, locations="iso_alpha",
projection="orthographic")
st.plotly_chart(fig)
# V. Population
st.markdown("---")
st.subheader("Way-dëkk ñu feebar daleu.")
st.write("Les chiffres présentés ci-dessous tiennent compte des publication du Ministère de la Santé et de l'Action Sociale. Certaines données sont manquantes, et nous n'affichons que les valeurs connues à ce jour.")
st.write("1. At ñu eupe ci yi Jangoroji di diap ", np.mean(df['Age'].dropna()), " ans")
ch = alt.Chart(df).mark_bar().encode(
x = 'Age:Q',
y=alt.Y('count()', title='Nombre de patients')
).properties(title="Atu aji wop gui ", height=300, width=700)
st.write(ch)
st.write("2. Ñu eup ci aji-wop yi aye goor lañu")
st.write(pd.DataFrame(df[['Homme', 'Femme']].dropna().sum()).transpose())
st.write("3. Ñu eupe ci ñu feebar bi diapeu ndakaru lañu dëkké")
ch2 = alt.Chart(df.dropna(subset=['Ville'])).mark_bar().encode(
x = 'Ville:N',
y=alt.Y('count()', title='Nombre de patients')
).properties(title="Ville connue du patient", height=300, width=700)
st.write(ch2)
st.write("4. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.")
st.write(df['Resident Senegal'].dropna().value_counts())
st.write("5. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.")
st.write(df['Resident Senegal'].dropna().value_counts())
st.write("6. Faan ñigua xamné aji wop gui ci laye teud lalu opital : ", np.mean(df['Temps Hospitalisation (j)'].dropna()), " Faan") | st.markdown("---")
st.subheader("Tassarok Jangorogui")
st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rewmi, moye waleu gui geuna ragalu ci walanté Jangoroji..")
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.