file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
neurosky_ecg.py | t1.daemon = True
t1.start()
print "Started CardioChip reader"
def check(self):
""" checks if thread currently exists """
return self.connected
def stop(self):
""" stops running thread """
self.connected = False
def setHRVUpdate(self, numRRI):
"""
set the number of RR intervals to count
between updating the HRV value
"""
self.HRV_UPDATE = numRRI
def _parseData(self, payload):
"""
given the byte payload from the serial connection, parse the first byte
as the code and return a list of dicts of all values found in the packet
dicts will be of the format: {'timestamp', t, <codename>: codeval}
Timestamps are based on the first raw_ecg data received on the host computer, and
extrapolated using a sample frequency of 512 Hz from there. This is accurate in the short term,
but should not be used for longer (>10 min) recordings.
"""
out=[]
bytesParsed = 0
while bytesParsed < len(payload):
#check for the extended Code Level, code and length
#count the number of EXCODE_BYTE
#extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )
#bytesParsed += extendedCodeLevel
#identify the length of the expected bytes in the payload
code = payload[bytesParsed]
bytesParsed +=1
if code > 0x7F:
# multi-byte code, length > 1
length = payload[bytesParsed]
bytesParsed +=1
else:
length = 1
if code == SENSOR_STATUS:
# value of 0==no contact, 200==contact
#print "leadoff: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )
bytesParsed +=1
elif code == HEART_RATE:
#print "HR: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )
bytesParsed +=1
elif code == CONFIG_BYTE:
#print "config: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )
bytesParsed +=1
elif code == RAW_ECG:
# raw value is between -32768 and 32767, in twos compliment form
# if the raw value is higher than 32768, it should be rolled around to allow for negative values
raw = payload[bytesParsed]*256 + payload[bytesParsed]
if raw >= 32768:
raw = raw - 65536
#print "ecg: %i" % ecg
# create the timestamp on each ECG sample, starting from the first
if self.starttime is None:
self.starttime = time.time()
self.curtime = self.starttime
else:
self.curtime = self.curtime + 1./self.Fs
out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )
bytesParsed += length
elif code == DEBUG_1:
#print "debug1: " + str(payload[bytesParsed:]).strip('[]')
out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )
bytesParsed += length
elif code == DEBUG_2:
#print "debug2: " + str(payload[bytesParsed:]).strip('[]')
out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )
bytesParsed += length
else:
print "unknown code: %i" % code
return out
def _read_cardiochip(self):
"""
read data packets from the cardiochip starter kit, via the bluetooth serial port
"""
cur_leadstatus = 0
sample_count =0
while self.connected:
sample_count+=1
#check for sync bytes
readbyte = ord(self.ser.read(1))
#print readbyte, SYNC_BYTE
if readbyte != SYNC_BYTE:
continue
readbyte = ord(self.ser.read(1))
if readbyte != SYNC_BYTE:
continue
#parse length byte
while True:
pLength = ord(self.ser.read(1))
if pLength != SYNC_BYTE:
break
if pLength > 169:
continue
#print "L: %i" % pLength
# collect payload bytes
payload = self.ser.read(pLength)
payload = [ord(x) for x in payload] #convert to int from string
#print "payload: " + str(payload).strip('[]')
# ones complement inverse of 8-bit payload sum
checksum = sum(payload) & 0xFF
checksum = ~checksum & 0xFF
# catch and verify checksum byte
chk = ord(self.ser.read(1))
#print "chk: " + str(checksum)
if chk != checksum:
print "checksum error, %i != %i" % (chk, checksum)
continue
output = self._parseData(payload)
lead_status = next(( d for d in output if 'leadoff' in d), None)
if lead_status is not None:
if cur_leadstatus != lead_status['leadoff']:
#we have a change
if lead_status['leadoff']==200:
print "LEAD ON"
elif lead_status['leadoff']==0:
print "LEAD OFF"
cur_leadstatus = lead_status['leadoff']
# store the output data in a queue
# first, create a tuple with the sample index and dict with the timestamp and ecg
ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)
if ecgdict is not None and sample_count>self.Fs*2:
#let's just ignore the first 2 seconds of crappy data
ecgdict[1]['leadoff'] = cur_leadstatus
#print ecgdict[1]
self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys
return
def isBufferEmpty(self):
""" check to see if ecg buffer is empty """
return self.ecg_buffer.empty()
def popBuffer(self):
""" get first value (dict) in the ecg_buffer """
return self.ecg_buffer.get()
def _ecgInitAlgLib(self,libname='TgEcgAlg64.dll', power_frequency=60):
""" initialize the TgEcg algorithm dll """
if sys.maxsize > (2**32)/2-1: #running 64 bit
print "loading Neurosky tg_ecg library, 64 bit"
libname = 'TgEcgAlg64.dll'
else:
print "loading Neurosky tg_ecg library, 32 bit"
#libname = 'TgEcgAlg.dll'
libname = 'tg_ecg.so'
print "loading analysis library: ", libname
E = cdll.LoadLibrary(libname)
E.tg_ecg_do_hrv_sdnn(0)
E.tg_ecg_do_relaxation_level(0)
E.tg_ecg_do_respiratory_rate(0)
E.tg_ecg_do_rri_precise(0)
E.tg_ecg_set_power_line_freq(power_frequency)
E.tg_ecg_get_raw_smoothed.restype = c_double
E.tg_ecg_init() # init the library with selected options
return E
def ecgResetAlgLib(self):
""" reset ecg algorithm """
print "resetting ecg analysis library"
self.analyze.tg_ecg_init()
self.starttime = None
self.curtime = None
def | (self):
"""
return the total number of RRIs held in the algorithm buffer
"""
return self.analyze.tg_ecg_get_total_rri_count()
def ecgalgAnalyzeRaw(self, D, nHRV=30): #, dataqueue):
"""
test to see if we have values in the ecg_buffer, and if so, pass
the most recent raw_ecg value into the TgEcg analysis framework
Returns dict with timestamp, filtered ECG, HR, and HRV, if available
This function expects a dict as input, with keys
"""
#D = self.popBuffer()
self.analyze.tg_ecg_update(D['ecg_raw'])
#ecg_filt = self.analyze.tg_ecg_get_raw_filtered() #delayed against raw by 211 samples
ecg_filt = self.analyze.tg_ecg_get_raw_smoothed() #delayed against raw by 450 samples, if 60Hz powerline
D['ecg_filt'] = ecg_filt
if self.analyze.tg_ecg_is_r_peak():
# | getTotalNumRRI | identifier_name |
neurosky_ecg.py | """
if sys.maxsize > (2**32)/2-1: #running 64 bit
print "loading Neurosky tg_ecg library, 64 bit"
libname = 'TgEcgAlg64.dll'
else:
print "loading Neurosky tg_ecg library, 32 bit"
#libname = 'TgEcgAlg.dll'
libname = 'tg_ecg.so'
print "loading analysis library: ", libname
E = cdll.LoadLibrary(libname)
E.tg_ecg_do_hrv_sdnn(0)
E.tg_ecg_do_relaxation_level(0)
E.tg_ecg_do_respiratory_rate(0)
E.tg_ecg_do_rri_precise(0)
E.tg_ecg_set_power_line_freq(power_frequency)
E.tg_ecg_get_raw_smoothed.restype = c_double
E.tg_ecg_init() # init the library with selected options
return E
def ecgResetAlgLib(self):
""" reset ecg algorithm """
print "resetting ecg analysis library"
self.analyze.tg_ecg_init()
self.starttime = None
self.curtime = None
def getTotalNumRRI(self):
"""
return the total number of RRIs held in the algorithm buffer
"""
return self.analyze.tg_ecg_get_total_rri_count()
def ecgalgAnalyzeRaw(self, D, nHRV=30): #, dataqueue):
"""
test to see if we have values in the ecg_buffer, and if so, pass
the most recent raw_ecg value into the TgEcg analysis framework
Returns dict with timestamp, filtered ECG, HR, and HRV, if available
This function expects a dict as input, with keys
"""
#D = self.popBuffer()
self.analyze.tg_ecg_update(D['ecg_raw'])
#ecg_filt = self.analyze.tg_ecg_get_raw_filtered() #delayed against raw by 211 samples
ecg_filt = self.analyze.tg_ecg_get_raw_smoothed() #delayed against raw by 450 samples, if 60Hz powerline
D['ecg_filt'] = ecg_filt
if self.analyze.tg_ecg_is_r_peak():
#print "found peak"
num_rri = self.analyze.tg_ecg_get_total_rri_count()
rri = self.analyze.tg_ecg_get_rri()
hr = self.analyze.tg_ecg_compute_hr_now()
D['rri']= rri
D['hr'] = hr
print "%i HR: %i (rri: %i)" % (num_rri, 60000* 1/rri, rri)
if num_rri>=15 and num_rri < nHRV:
# slowly increase number of RRIs in HRV calculation until we reach nHRV
# This is equivalen to starting with a window of 15 RRIs and increasing the window length to max=nHRV
nHRV = num_rri
if num_rri >= nHRV and (num_rri+2) % self.HRV_UPDATE == 0:
#calculate every HRV_UPDATE heartbeats, starting at nHRV (window increases from 15 to 30)
hrv = self.analyze.tg_ecg_compute_hrv(nHRV)
D['hrv'] = hrv
print "hrv: " + str(hrv)
return D
if __name__ == "__main__":
"""
all of the code below is used for visualization and testing of the ECG framework
not to be used as production code, but can be used for examples on how to use
the NSK framework
"""
import numpy as np
#from matplotlib import pyplot as plt
import pylab as plt
# hack to get interactive plot working
# https://github.com/matplotlib/matplotlib/issues/3505
sys.ps1 = 'IAMAHACK'
target_port = 'COM3' #production windows box
#target_port = 'COM8' #mike's laptop
plot_fig=True
ecgdict = []
try:
nskECG = NeuroskyECG(target_port)
except serial.serialutil.SerialException:
print "Could not open target serial port: %s" % target_port
sys.exit(1)
nskECG.start()
if plot_fig:
plt.ion()
#load the queues to plot
# t = [ x/nskECG.Fs for x in range(0,nskECG.Fs*1)]
# ecgval = [0]*nskECG.Fs*1
t=[time.time()]
ecgval =[0]
#set up the test plot
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,1,1) #smoothed ECG
#ecgtrace, = plt.plot(0,0)
ecgtrace, = plt.plot(t,ecgval)
ax1.set_ylim((-10000, 10000))
ax2 = fig.add_subplot(2,1,2) # HRV
hrvtrace, = ax2.plot(t,[0])
ax2.set_ylim((0, 300))
time.sleep(0.1)
##########################################
sample_count = 0
leadoff_count = 0
isreset=False
while True:
if not nskECG.isBufferEmpty():
sample_count+=1
#print "buffer len", nskECG.ecg_buffer.qsize()
D = nskECG.popBuffer()
# ignore data prior to leadoff
if D['leadoff']==0 and sample_count > nskECG.Fs*2:
leadoff_count+=1
#print "leadoff", D['leadoff']
if leadoff_count>nskECG.Fs*2: #more than 2 seconds of leadoff, drop them
#if not isreset: # we haven't reset recently, DO IT
if nskECG.analyze.tg_ecg_get_total_rri_count()!=0:
isreset = True
ecgdict = [] #reset the buffer
nskECG.ecgResetAlgLib()
print "num rri post reset", nskECG.analyze.tg_ecg_get_total_rri_count()
nskECG.ecg_buffer.task_done()
continue
else: # leadoff==200, or lead on
#print "done resetting, loading data again"
leadoff_count=0
if isreset:
print "turning things back on"
isreset = False
D = nskECG.ecgalgAnalyzeRaw(D)
### the next two lines are examples of how to pop values from the
# internal buffer and create a list of filtered ecg values.
# You can do the same thing for 'rri' and 'hrv' values
# I would also keep track of the unix timestamp, to make sure it lines up
# with the EEG timestamps
#minibuffer = [nskECG.popBuffer() for i in range(0,nskECG.Fs/4.)]
#ecgfilt = [x['ecg_filt'] for x in minibuffer]
#if 'hrv' in D:
# cur_hrv=D['hrv']
ecgdict.append(D)
#print D
#########################################
# plot the data
if plot_fig and sample_count%64==0:
#print "length ecgdict", len(ecgdict), -min([len(ecgdict),512*4])
ecgsub = ecgdict[-min([len(ecgdict),nskECG.Fs*4]):]
ecg_t = [x['timestamp'] for x in ecgsub]
ecg_filt = [x['ecg_filt'] for x in ecgsub]
ymin = float(min(ecg_filt))-100
ymax = float(max(ecg_filt))+100
plt.axes(ax1)
ax1.set_ylim((ymin,ymax))
ecgtrace.set_xdata(ecg_t)
ecgtrace.set_ydata(ecg_filt)
ax1.relim()
ax1.autoscale_view()
#################
# update hrv
hrv_t=[x['timestamp'] for x in ecgsub if 'hrv' in x]
hrv =[x['hrv'] for x in ecgsub if 'hrv' in x]
if len(hrv) != 0:
#print "length hrv", hrv
| plt.axes(ax2)
ymin = float(min(hrv))-10
ymax = float(max(hrv))+10
ax2.set_ylim((ymin,ymax))
hrvtrace.set_xdata(hrv_t)
hrvtrace.set_ydata(hrv)
ax2.relim()
ax2.autoscale_view() | conditional_block |
|
cloud4.py |
if batchnorm:
self.bn = nn.BatchNorm1d(hiddendim, track_running_stats = True)
self.fc2 = nn.Linear(hiddendim, inputdim)
def forward(self, x):
z = self.bn(self.fc1(x)) if self.batchnorm else self.fc1(x)
z = functional.relu(z)
z = self.fc2(z)
return z + x, z
class OneRepResNet(nn.Module):
def __init__(self, nblocks, inputdim, hiddendim, batchnorm, nclasses, learnclassifier, yintercept, initialize):
super(OneRepResNet, self).__init__()
self.blocks = nn.ModuleList([ResBlock(inputdim, hiddendim, batchnorm) for i in range(nblocks)])
self.fcOut = nn.Linear(inputdim, nclasses)
self.blocks.apply(initialize)
if learnclassifier == 1:
initialize(self.fcOut)
else:
with torch.no_grad():
self.fcOut.weight = torch.nn.Parameter(torch.tensor([[1., 1.], [0., 0.]]))
self.fcOut.bias = torch.nn.Parameter(torch.tensor([yintercept, 0.]))
if learnclassifier == 0:
for param in self.fcOut.parameters():
param.requires_grad = False
def forward(self, x):
rs = []
for block in self.blocks:
x, r = block(x)
rs.append(r)
z = self.fcOut(x)
return z, rs
def save_input_output_hook(name, mod, inp, out):
global inps, outs
inp0 = inp[0] if type(inp) is tuple else inp
out0 = out[0] if type(out) is tuple else out
inps[name].append(inp0.detach().numpy().copy())
outs[name].append(out0.detach().numpy().copy())
def save_outgrad_hook(name, mod, ginp, gout):
global gouts
gout0 = gout[0] if type(gout) is tuple and gout[0] is not None else gout
gouts[name].append(gout0.detach().numpy().copy())
def register_hooks(model):
for name, m in model.named_modules():
m.register_forward_hook(partial(save_input_output_hook, name))
m.register_backward_hook(partial(save_outgrad_hook, name))
def W2(X1, X2):
n = len(X1)
C = np.zeros((n, n))
for i in range(n):
for j in range(n):
C[i, j] = np.linalg.norm(X1[i] - X2[j])
optimal_plan = ot.emd([], [], C)
optimal_cost = np.sum(optimal_plan * C)
return optimal_cost
def train(model, nepochs, criterion, lambdatransport, lambdaloss0, tau, us, optimizer, trainloader, testloader, X_test, Y_test, nblocks, folder):
ntrain = len(trainloader)
losses, train_accuracy, test_accuracy = [], [], []
lambdaloss = lambdaloss0
i = 0
print('---train')
for epoch in range(1, nepochs + 1):
model.train()
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
for x, y in trainloader:
i += 1
optimizer.zero_grad()
out, rs = model(x)
if us == 0:
loss = criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
else:
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
loss.backward()
optimizer.step()
if us > 0 and i % us == 0:
out, rs = model(x)
lambdaloss += tau * criterion(out, y).item()
epochloss = loss_meter.avg
epochacc = accuracy_meter.avg
acc, F, C, W = test(model, criterion, lambdatransport, lambdaloss, testloader, X_test, Y_test, epoch, nblocks, folder)
print('[epoch %d] lambda loss: %.3f train loss: %.3f train accuracy: %.3f test accuracy: %.3f' % (epoch, lambdaloss, epochloss, epochacc, acc))
losses.append(epochloss)
train_accuracy.append(epochacc)
test_accuracy.append(acc)
if epoch > 3 and test_accuracy[-1] == test_accuracy[-2] == test_accuracy[-3] == 1 and train_accuracy[-1] == train_accuracy[-2] == train_accuracy[-3] == 1:
break
return losses, test_accuracy, epoch, F, C, W
def test(model, criterion, lambdatransport, lambdaloss, testloader, X_test, Y_test, epoch, nblocks, folder):
model.eval()
X = []
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
global inps, outs, gouts
inps, outs, gouts = collections.defaultdict(list), collections.defaultdict(list), collections.defaultdict(list)
for (x, y) in testloader:
out, rs = model(x)
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
loss.backward()
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
inps_ = {name: np.vstack(inp) for name, inp in inps.items()}
outs_ = {name: np.vstack(out) for name, out in outs.items()}
gouts_ = {name: np.vstack(gout) for name, gout in gouts.items()}
F, C, W = plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder)
return accuracy_meter.avg, F, C, W
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def update_meters(y, pred, loss, loss_meter, accuracy_meter, t = None, time_meter = None):
num = len(y)
correct = (pred == y).sum().item()
accuracy = correct / num
loss_meter.update(loss, num)
accuracy_meter.update(accuracy, num)
if t is not None and time_meter is not None :
time_meter.update(t, 1) | g, h = (c - a) / (b - d), (f - e) / (b - d)
if nblocks < 10 : # point cloud
x = np.linspace(- 20, 10, 100)
y = g * x + h
fig, ax = plt.subplots(3, 3, sharex = 'all', sharey = 'all')
fig.set_size_inches(18, 18)
fig.suptitle('Transformed test data after epoch {}. Linear classifier slope {} intercept {}'.format(epoch, g, h))
for i in range(nblocks):
X = outs_['blocks.' + str(i)]
if nblocks < 10 : # point cloud
row, col = int(i / 3), int(i % 3)
ax[row, col].scatter(X[Y_test == 0, 0], X[Y_test == 0, 1], c = 'red')
ax[row, col].scatter(X[Y_test == 1, 0], X[Y_test == 1, 1], c = 'blue')
ax[row, col].plot(x, y, '-g', label = 'linear classifier')
ax[row, col].set_title('block ' + str(i + 1))
X_ = X_test if i == 0 else outs_['blocks.' + str(i - 1)]
W.append(W2(X_, X)) # W2 movement
X = outs_['blocks.' + str(i) + '.fc2']
F.append(np.mean(np.sqrt(np.sum(np.abs(X) ** 2, axis = -1)))) # forcing function
if i > 0 : # cosine loss
L = gouts_['blocks.' + str(i - 1)]
C.append(np.mean(np.sum(np.multiply(X, L), axis = -1)))
if |
def plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder):
F, C, W = [], [], []
a, b, c, d = model.fcOut.weight[0, 0].item(), model.fcOut.weight[0, 1].item(), model.fcOut.weight[1, 0].item(), model.fcOut.weight[1, 1].item()
e, f = model.fcOut.bias[0].item(), model.fcOut.bias[1].item() | random_line_split |
cloud4.py |
def forward(self, x):
z = self.bn(self.fc1(x)) if self.batchnorm else self.fc1(x)
z = functional.relu(z)
z = self.fc2(z)
return z + x, z
class OneRepResNet(nn.Module):
def __init__(self, nblocks, inputdim, hiddendim, batchnorm, nclasses, learnclassifier, yintercept, initialize):
super(OneRepResNet, self).__init__()
self.blocks = nn.ModuleList([ResBlock(inputdim, hiddendim, batchnorm) for i in range(nblocks)])
self.fcOut = nn.Linear(inputdim, nclasses)
self.blocks.apply(initialize)
if learnclassifier == 1:
initialize(self.fcOut)
else:
with torch.no_grad():
self.fcOut.weight = torch.nn.Parameter(torch.tensor([[1., 1.], [0., 0.]]))
self.fcOut.bias = torch.nn.Parameter(torch.tensor([yintercept, 0.]))
if learnclassifier == 0:
for param in self.fcOut.parameters():
param.requires_grad = False
def forward(self, x):
rs = []
for block in self.blocks:
x, r = block(x)
rs.append(r)
z = self.fcOut(x)
return z, rs
def save_input_output_hook(name, mod, inp, out):
global inps, outs
inp0 = inp[0] if type(inp) is tuple else inp
out0 = out[0] if type(out) is tuple else out
inps[name].append(inp0.detach().numpy().copy())
outs[name].append(out0.detach().numpy().copy())
def save_outgrad_hook(name, mod, ginp, gout):
global gouts
gout0 = gout[0] if type(gout) is tuple and gout[0] is not None else gout
gouts[name].append(gout0.detach().numpy().copy())
def register_hooks(model):
for name, m in model.named_modules():
m.register_forward_hook(partial(save_input_output_hook, name))
m.register_backward_hook(partial(save_outgrad_hook, name))
def W2(X1, X2):
n = len(X1)
C = np.zeros((n, n))
for i in range(n):
for j in range(n):
C[i, j] = np.linalg.norm(X1[i] - X2[j])
optimal_plan = ot.emd([], [], C)
optimal_cost = np.sum(optimal_plan * C)
return optimal_cost
def train(model, nepochs, criterion, lambdatransport, lambdaloss0, tau, us, optimizer, trainloader, testloader, X_test, Y_test, nblocks, folder):
ntrain = len(trainloader)
losses, train_accuracy, test_accuracy = [], [], []
lambdaloss = lambdaloss0
i = 0
print('---train')
for epoch in range(1, nepochs + 1):
model.train()
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
for x, y in trainloader:
i += 1
optimizer.zero_grad()
out, rs = model(x)
if us == 0:
loss = criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
else:
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
loss.backward()
optimizer.step()
if us > 0 and i % us == 0:
out, rs = model(x)
lambdaloss += tau * criterion(out, y).item()
epochloss = loss_meter.avg
epochacc = accuracy_meter.avg
acc, F, C, W = test(model, criterion, lambdatransport, lambdaloss, testloader, X_test, Y_test, epoch, nblocks, folder)
print('[epoch %d] lambda loss: %.3f train loss: %.3f train accuracy: %.3f test accuracy: %.3f' % (epoch, lambdaloss, epochloss, epochacc, acc))
losses.append(epochloss)
train_accuracy.append(epochacc)
test_accuracy.append(acc)
if epoch > 3 and test_accuracy[-1] == test_accuracy[-2] == test_accuracy[-3] == 1 and train_accuracy[-1] == train_accuracy[-2] == train_accuracy[-3] == 1:
break
return losses, test_accuracy, epoch, F, C, W
def test(model, criterion, lambdatransport, lambdaloss, testloader, X_test, Y_test, epoch, nblocks, folder):
model.eval()
X = []
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
global inps, outs, gouts
inps, outs, gouts = collections.defaultdict(list), collections.defaultdict(list), collections.defaultdict(list)
for (x, y) in testloader:
out, rs = model(x)
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
loss.backward()
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
inps_ = {name: np.vstack(inp) for name, inp in inps.items()}
outs_ = {name: np.vstack(out) for name, out in outs.items()}
gouts_ = {name: np.vstack(gout) for name, gout in gouts.items()}
F, C, W = plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder)
return accuracy_meter.avg, F, C, W
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def update_meters(y, pred, loss, loss_meter, accuracy_meter, t = None, time_meter = None):
num = len(y)
correct = (pred == y).sum().item()
accuracy = correct / num
loss_meter.update(loss, num)
accuracy_meter.update(accuracy, num)
if t is not None and time_meter is not None :
time_meter.update(t, 1)
def plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder):
F, C, W = [], [], []
a, b, c, d = model.fcOut.weight[0, 0].item(), model.fcOut.weight[0, 1].item(), model.fcOut.weight[1, 0].item(), model.fcOut.weight[1, 1].item()
e, f = model.fcOut.bias[0].item(), model.fcOut.bias[1].item()
g, h = (c - a) / (b - d), (f - e) / (b - d)
if nblocks < 10 : # point cloud
x = np.linspace(- 20, 10, 100)
y = g * x + h
fig, ax = plt.subplots(3, 3, sharex = 'all', sharey = 'all')
fig.set_size_inches(18, 18)
fig.suptitle('Transformed test data after epoch {}. Linear classifier slope {} intercept {}'.format(epoch, g, h))
for i in range(nblocks):
X = outs_['blocks.' + str(i)]
if nblocks < 10 : # point cloud
row, col = int(i / 3), int(i % 3)
ax[row, col].scatter(X[Y_test == 0, 0], X[Y_test == 0, 1], c = 'red')
ax[row, col].scatter(X[Y_test == 1, 0], X[Y_test == 1, 1], c = 'blue')
ax[row, col].plot(x, y, '-g', label = 'linear classifier')
ax[row, col].set_title('block ' + str(i + 1))
X_ = X_test if i == 0 else outs_['blocks.' + str(i - 1)]
W.append(W2(X_, X)) # W2 movement
X = outs_['blocks.' + str(i) + '.fc2']
F.append(np.mean(np.sqrt(np.sum(np.abs(X) ** 2, axis = -1)))) # forcing function
if i > 0 : # cosine loss
L = | super(ResBlock, self).__init__()
self.batchnorm = batchnorm
self.fc1 = nn.Linear(inputdim, hiddendim)
if batchnorm:
self.bn = nn.BatchNorm1d(hiddendim, track_running_stats = True)
self.fc2 = nn.Linear(hiddendim, inputdim) | identifier_body |
|
cloud4.py | (ndata, testsize, data, noise, factor, dataseed, modelseed, nblocks, datadim, hiddendim, batchnorm, nclasses, learnclassifier,
yintercept, biginit, biginitstd, lambdatransport, lambdaloss0, tau, uzawasteps, batchsize, nepochs, learningrate, beta1, beta2):
folder0 = ('circles' if data == 1 else 'moons') + '-dd' + str(datadim) + 'nc' + str(nclasses)
folder1 = 'points{}testsize{}'.format(ndata, testsize)
folder2 = 'noise{}factor{}'.format(noise, factor)
folder3 = 'hiddendim' + str(hiddendim) + ('batchnorm' if batchnorm else '')
folder4 = 'batchsize' + str(batchsize) + 'int' + str(yintercept) + 'lc' + str(learnclassifier) + ('bi' + str(biginitstd) if biginit else '')
folder5 = 'blocks' + str(nblocks)
folder6 = 'uzawa-lambdaloss' + str(lambdaloss0) + 'tau' + str(tau) + 'us' + str(uzawasteps) if uzawasteps > 0 else 'lambdatransport' + str(lambdatransport)
folder7 = 'ne{}lr{}b1{}b2{}'.format(nepochs, learningrate, beta1, beta2)
folder8 = 'ds{}ms{}'.format(dataseed, modelseed)
folder9 = time.strftime("%Y%m%d-%H%M%S")
folder = os.path.join(os.getcwd(), 'figures3', folder0, folder1, folder2, folder3, folder4, folder5, folder6, folder7, folder8, folder9)
os.makedirs(folder)
return folder
def dataloaders(ndata, testsize, data, noise, factor, dataseed, batchsize):
X, Y = make_circles(ndata, True, noise, dataseed, factor) if data == 1 else make_moons(ndata, True, noise, dataseed)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = testsize, random_state = dataseed)
X_train_ = torch.from_numpy(X_train).type(torch.FloatTensor)
Y_train_ = torch.from_numpy(Y_train).type(torch.LongTensor)
X_test_ = torch.from_numpy(X_test).type(torch.FloatTensor)
Y_test_ = torch.from_numpy(Y_test).type(torch.LongTensor)
train = torchdata.TensorDataset(X_train_, Y_train_)
test = torchdata.TensorDataset(X_test_, Y_test_)
trainloader = torchdata.DataLoader(train, batch_size = batchsize)
testloader = torchdata.DataLoader(test, batch_size = batchsize)
return trainloader, testloader, X, Y, X_test, Y_test
def plotdata(X, Y, learnclassifier, yintercept, title, folder):
plt.figure(figsize = (15, 15))
plt.scatter(X[Y == 0, 0], X[Y == 0, 1], c = 'red')
plt.scatter(X[Y == 1, 0], X[Y == 1, 1], c = 'blue')
if not learnclassifier:
x = np.linspace(- 20, 10, 100)
y = - x - yintercept
plt.plot(x, y, '-g', label = 'linear classifier')
plt.title(title)
plt.savefig(os.path.join(folder, title + '.png'), bbox_inches = 'tight')
plt.close()
def plotscores(losses, accuracy, folder):
plt.figure(1)
plt.subplot(211)
plt.plot(losses)
plt.ylabel('train loss')
plt.subplot(212)
plt.plot(accuracy)
plt.xlabel('epoch')
plt.ylabel('test accuracy')
plt.savefig(os.path.join(folder, 'loss-acc.png'), bbox_inches = 'tight')
plt.close()
def initialize_(biginit, biginitstd, module):
if isinstance(module, nn.Conv1d) or isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode = 'fan_out', nonlinearity = 'relu')
elif isinstance(module, nn.BatchNorm1d) or isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
if biginit:
nn.init.normal_(module.weight, mean = 0.0, std = biginitstd)
nn.init.normal_(module.bias, mean = 0.0, std = biginitstd)
else:
nn.init.kaiming_normal_(module.weight)
nn.init.constant_(module.bias, 0.0)
class ResBlock(nn.Module):
def __init__(self, inputdim, hiddendim, batchnorm):
super(ResBlock, self).__init__()
self.batchnorm = batchnorm
self.fc1 = nn.Linear(inputdim, hiddendim)
if batchnorm:
self.bn = nn.BatchNorm1d(hiddendim, track_running_stats = True)
self.fc2 = nn.Linear(hiddendim, inputdim)
def forward(self, x):
z = self.bn(self.fc1(x)) if self.batchnorm else self.fc1(x)
z = functional.relu(z)
z = self.fc2(z)
return z + x, z
class OneRepResNet(nn.Module):
def __init__(self, nblocks, inputdim, hiddendim, batchnorm, nclasses, learnclassifier, yintercept, initialize):
super(OneRepResNet, self).__init__()
self.blocks = nn.ModuleList([ResBlock(inputdim, hiddendim, batchnorm) for i in range(nblocks)])
self.fcOut = nn.Linear(inputdim, nclasses)
self.blocks.apply(initialize)
if learnclassifier == 1:
initialize(self.fcOut)
else:
with torch.no_grad():
self.fcOut.weight = torch.nn.Parameter(torch.tensor([[1., 1.], [0., 0.]]))
self.fcOut.bias = torch.nn.Parameter(torch.tensor([yintercept, 0.]))
if learnclassifier == 0:
for param in self.fcOut.parameters():
param.requires_grad = False
def forward(self, x):
rs = []
for block in self.blocks:
x, r = block(x)
rs.append(r)
z = self.fcOut(x)
return z, rs
def save_input_output_hook(name, mod, inp, out):
global inps, outs
inp0 = inp[0] if type(inp) is tuple else inp
out0 = out[0] if type(out) is tuple else out
inps[name].append(inp0.detach().numpy().copy())
outs[name].append(out0.detach().numpy().copy())
def save_outgrad_hook(name, mod, ginp, gout):
global gouts
gout0 = gout[0] if type(gout) is tuple and gout[0] is not None else gout
gouts[name].append(gout0.detach().numpy().copy())
def register_hooks(model):
for name, m in model.named_modules():
m.register_forward_hook(partial(save_input_output_hook, name))
m.register_backward_hook(partial(save_outgrad_hook, name))
def W2(X1, X2):
n = len(X1)
C = np.zeros((n, n))
for i in range(n):
for j in range(n):
C[i, j] = np.linalg.norm(X1[i] - X2[j])
optimal_plan = ot.emd([], [], C)
optimal_cost = np.sum(optimal_plan * C)
return optimal_cost
def train(model, nepochs, criterion, lambdatransport, lambdaloss0, tau, us, optimizer, trainloader, testloader, X_test, Y_test, nblocks, folder):
ntrain = len(trainloader)
losses, train_accuracy, test_accuracy = [], [], []
lambdaloss = lambdaloss0
i = 0
print('---train')
for epoch in range(1, nepochs + 1):
model.train()
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
for x, y in trainloader:
i += 1
optimizer.zero_grad()
out, rs = model(x)
if us == 0:
loss = criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
else:
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
loss.backward()
optimizer.step()
if us > 0 and i % us == 0:
out, rs = model(x)
lambdaloss += tau * criterion(out, y).item()
epochloss = loss_meter.avg
epochacc = accuracy_meter.avg
acc, F, C, W = test | makefolder | identifier_name |
|
cloud4.py | _test, Y_test, epoch, nblocks, folder)
print('[epoch %d] lambda loss: %.3f train loss: %.3f train accuracy: %.3f test accuracy: %.3f' % (epoch, lambdaloss, epochloss, epochacc, acc))
losses.append(epochloss)
train_accuracy.append(epochacc)
test_accuracy.append(acc)
if epoch > 3 and test_accuracy[-1] == test_accuracy[-2] == test_accuracy[-3] == 1 and train_accuracy[-1] == train_accuracy[-2] == train_accuracy[-3] == 1:
break
return losses, test_accuracy, epoch, F, C, W
def test(model, criterion, lambdatransport, lambdaloss, testloader, X_test, Y_test, epoch, nblocks, folder):
model.eval()
X = []
loss_meter, accuracy_meter = AverageMeter(), AverageMeter()
global inps, outs, gouts
inps, outs, gouts = collections.defaultdict(list), collections.defaultdict(list), collections.defaultdict(list)
for (x, y) in testloader:
out, rs = model(x)
loss = lambdaloss * criterion(out, y) + lambdatransport * sum([torch.norm(r, 2) for r in rs])
loss.backward()
_, pred = torch.max(out.data, 1)
update_meters(y, pred, loss.item(), loss_meter, accuracy_meter)
inps_ = {name: np.vstack(inp) for name, inp in inps.items()}
outs_ = {name: np.vstack(out) for name, out in outs.items()}
gouts_ = {name: np.vstack(gout) for name, gout in gouts.items()}
F, C, W = plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder)
return accuracy_meter.avg, F, C, W
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def update_meters(y, pred, loss, loss_meter, accuracy_meter, t = None, time_meter = None):
num = len(y)
correct = (pred == y).sum().item()
accuracy = correct / num
loss_meter.update(loss, num)
accuracy_meter.update(accuracy, num)
if t is not None and time_meter is not None :
time_meter.update(t, 1)
def plotmetrics(outs_, X_test, Y_test, gouts_, epoch, model, nblocks, folder):
F, C, W = [], [], []
a, b, c, d = model.fcOut.weight[0, 0].item(), model.fcOut.weight[0, 1].item(), model.fcOut.weight[1, 0].item(), model.fcOut.weight[1, 1].item()
e, f = model.fcOut.bias[0].item(), model.fcOut.bias[1].item()
g, h = (c - a) / (b - d), (f - e) / (b - d)
if nblocks < 10 : # point cloud
x = np.linspace(- 20, 10, 100)
y = g * x + h
fig, ax = plt.subplots(3, 3, sharex = 'all', sharey = 'all')
fig.set_size_inches(18, 18)
fig.suptitle('Transformed test data after epoch {}. Linear classifier slope {} intercept {}'.format(epoch, g, h))
for i in range(nblocks):
X = outs_['blocks.' + str(i)]
if nblocks < 10 : # point cloud
row, col = int(i / 3), int(i % 3)
ax[row, col].scatter(X[Y_test == 0, 0], X[Y_test == 0, 1], c = 'red')
ax[row, col].scatter(X[Y_test == 1, 0], X[Y_test == 1, 1], c = 'blue')
ax[row, col].plot(x, y, '-g', label = 'linear classifier')
ax[row, col].set_title('block ' + str(i + 1))
X_ = X_test if i == 0 else outs_['blocks.' + str(i - 1)]
W.append(W2(X_, X)) # W2 movement
X = outs_['blocks.' + str(i) + '.fc2']
F.append(np.mean(np.sqrt(np.sum(np.abs(X) ** 2, axis = -1)))) # forcing function
if i > 0 : # cosine loss
L = gouts_['blocks.' + str(i - 1)]
C.append(np.mean(np.sum(np.multiply(X, L), axis = -1)))
if nblocks < 10 : # plot point cloud
fig.savefig(os.path.join(folder, 'testset_epoch' + str(epoch) + '.png'), bbox_inches = 'tight')
plt.close(fig)
plot_arrays(F, C, W, nblocks, epoch, folder)
return F, C, W
def plot_arrays(F, C, W, nblocks, epoch, folder):
plt.figure(figsize = (7, 7)) # plot cosine loss
plt.plot(list(range(2, nblocks + 1)), C)
plt.title('cos(f, grad L) after epoch ' + str(epoch))
plt.xlabel('block $k$')
plt.ylabel('cos( f(h), grad_h L )')
plt.savefig(os.path.join(folder, 'cos_epoch' + str(epoch) + '.png'), bbox_inches = 'tight')
plt.close()
plt.figure(figsize = (7, 7)) # plot forcing function and W2 movement
plt.plot(list(range(1, nblocks + 1)), F, 'b', label = 'Average $|| f_k(x) ||$')
plt.plot(list(range(1, nblocks + 1)), W, 'r', label = '$W_2$ distance')
plt.title('f and wasserstein distance after epoch ' + str(epoch))
plt.xlabel('block $k$')
plt.legend(loc = 'best')
plt.savefig(os.path.join(folder, 'distance_epoch' + str(epoch) + '.png'), bbox_inches = 'tight')
plt.close()
def experiment(ndata = 1000,
testsize = 0.2,
data = 1,
noise = 0.05,
factor = 0.3,
dataseed = None,
modelseed = None,
nblocks = 9,
inputdim = 2,
hiddendim = 2,
batchnorm = False,
nclasses = 2,
learnclassifier = False,
yintercept = 20,
biginit = False,
biginitstd = 5,
lambdatransport = 1,
lambdaloss0 = 0.1,
tau = 0.1,
us = 5,
batchsize = 10,
nepochs = 100,
learningrate = 0.01,
beta1 = 0.9,
beta2 = 0.99,
experiments = False) :
t0 = time.time()
folder = makefolder(ndata, testsize, data, noise, factor, dataseed, modelseed, nblocks, inputdim, hiddendim, batchnorm, nclasses,
learnclassifier, yintercept, biginit, biginitstd, lambdatransport, lambdaloss0, tau, us, batchsize, nepochs, learningrate, beta1, beta2)
if experiments :
stdout0 = sys.stdout
sys.stdout = open(os.path.join(folder, 'output.txt'), 'wt')
frame = inspect.currentframe()
names, _, _, values = inspect.getargvalues(frame)
print('--- experiment from cloud3.py with parameters')
for name in names:
print('%s = %s' % (name, values[name]))
if us == 0 and (lambdaloss0 != 1 or tau > 0):
print('us = 0 means no uzawa. lambda loss is fixed to 1 and tau to 0')
lambdaloss0, tau = 1, 0
if us > 0 and lambdatransport != 1:
print('us > 0 means uzawa. lambda transport is fixed to 1')
lambdatransport = 1
trainloader, testloader, X, Y, X_test, Y_test = dataloaders(ndata, testsize, data, noise, factor, dataseed, batchsize)
plotdata(X, Y, learnclassifier, yintercept, 'data', folder)
plotdata(X_test, Y_test, learnclassifier, yintercept, 'testdata', folder)
if modelseed is not None:
| torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(modelseed)
np.random.seed(modelseed) | conditional_block |
|
BAAFNet.py | t.name for t in e.op.inputs])
print([t.name for t in e.op.outputs])
a = 1 / 0
print('finished')
self.sess.close()
def evaluate(self, dataset):
# Initialise iterator with validation data
self.sess.run(dataset.val_init_op)
gt_classes = [0 for _ in range(self.config.num_classes)]
positive_classes = [0 for _ in range(self.config.num_classes)]
true_positive_classes = [0 for _ in range(self.config.num_classes)]
val_total_correct = 0
val_total_seen = 0
for step_id in range(self.config.val_steps):
if step_id % 50 == 0:
print(str(step_id) + ' / ' + str(self.config.val_steps))
try:
ops = (self.prob_logits, self.labels, self.accuracy)
stacked_prob, labels, acc = self.sess.run(ops, {self.is_training: False})
pred = np.argmax(stacked_prob, 1)
if not self.config.ignored_label_inds:
pred_valid = pred
labels_valid = labels
else:
invalid_idx = np.where(labels == self.config.ignored_label_inds)[0]
labels_valid = np.delete(labels, invalid_idx)
labels_valid = labels_valid - 1
pred_valid = np.delete(pred, invalid_idx)
correct = np.sum(pred_valid == labels_valid)
val_total_correct += correct
val_total_seen += len(labels_valid)
conf_matrix = confusion_matrix(labels_valid, pred_valid, np.arange(0, self.config.num_classes, 1))
gt_classes += np.sum(conf_matrix, axis=1)
positive_classes += np.sum(conf_matrix, axis=0)
true_positive_classes += np.diagonal(conf_matrix)
except tf.errors.OutOfRangeError:
break
iou_list = []
for n in range(0, self.config.num_classes, 1):
iou = true_positive_classes[n] / float(gt_classes[n] + positive_classes[n] - true_positive_classes[n])
iou_list.append(iou)
mean_iou = sum(iou_list) / float(self.config.num_classes)
log_out('eval accuracy: {}'.format(val_total_correct / float(val_total_seen)), self.Log_file)
log_out('mean IOU:{}'.format(mean_iou), self.Log_file)
mean_iou = 100 * mean_iou
log_out('Mean IoU = {:.1f}%'.format(mean_iou), self.Log_file)
s = '{:5.2f} | '.format(mean_iou)
for IoU in iou_list:
s += '{:5.2f} '.format(100 * IoU)
log_out('-' * len(s), self.Log_file)
log_out(s, self.Log_file)
log_out('-' * len(s) + '\n', self.Log_file)
return mean_iou
def get_loss(self, logits, labels, pre_cal_weights):
# calculate the weighted cross entropy according to the inverse frequency
class_weights = tf.convert_to_tensor(pre_cal_weights, dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=self.config.num_classes)
weights = tf.reduce_sum(class_weights * one_hot_labels, axis=1)
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
weighted_losses = unweighted_losses * weights
output_loss = tf.reduce_mean(weighted_losses)
return output_loss
def bilateral_context_block(self, feature, xyz, neigh_idx, d_out, name, is_training):
"""
Inputs:
feature: [B, N, 1, c] input features
xyz: [B, N, 3] input coordinates
neigh_idx: [B, N, k] indices of k neighbors
Output:
output_feat: [B, N, 1, 2*d_out] encoded (output) features
shifted_neigh_xyz: [B, N, k, 3] shifted neighbor coordinates, for augmentation loss
"""
batch_size = tf.shape(xyz)[0]
num_points = tf.shape(xyz)[1]
# Input Encoding
feature = helper_tf_util.conv2d(feature, d_out // 2, [1, 1], name + 'mlp1', [1, 1], 'VALID', True, is_training)
# Bilateral Augmentation
neigh_feat = self.gather_neighbour(tf.squeeze(feature, axis=2), neigh_idx) # B, N, k, d_out/2
neigh_xyz = self.gather_neighbour(xyz, neigh_idx) # B, N, k, 3
tile_feat = tf.tile(feature, [1, 1, self.config.k_n, 1]) # B, N, k, d_out/2
tile_xyz = tf.tile(tf.expand_dims(xyz, axis=2), [1, 1, self.config.k_n, 1]) # B, N, k, 3
feat_info = tf.concat([neigh_feat - tile_feat, tile_feat], axis=-1) # B, N, k, d_out
neigh_xyz_offsets = helper_tf_util.conv2d(feat_info, xyz.get_shape()[-1].value, [1, 1], name + 'mlp5', [1, 1], 'VALID', True, is_training) # B, N, k, 3
shifted_neigh_xyz = neigh_xyz + neigh_xyz_offsets # B, N, k, 3
xyz_info = tf.concat([neigh_xyz - tile_xyz, shifted_neigh_xyz, tile_xyz], axis=-1) # B, N, k, 9
neigh_feat_offsets = helper_tf_util.conv2d(xyz_info, feature.get_shape()[-1].value, [1, 1], name + 'mlp6', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
shifted_neigh_feat = neigh_feat + neigh_feat_offsets # B, N, k, d_out/2
xyz_encoding = helper_tf_util.conv2d(xyz_info, d_out//2, [1, 1], name + 'mlp7', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
feat_info = tf.concat([shifted_neigh_feat, feat_info], axis=-1) # B, N, k, 3/2*d_out
feat_encoding = helper_tf_util.conv2d(feat_info, d_out//2, [1, 1], name + 'mlp8', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
# Mixed Local Aggregation
overall_info = tf.concat([xyz_encoding, feat_encoding], axis=-1) # B, N, k, d_out
k_weights = helper_tf_util.conv2d(overall_info, overall_info.get_shape()[-1].value, [1, 1], name + 'mlp9', [1, 1], 'VALID', bn=False, activation_fn=None) # B, N, k, d_out
k_weights = tf.nn.softmax(k_weights, axis=2) # B, N, k, d_out
overall_info_weighted_sum = tf.reduce_sum(overall_info * k_weights, axis=2, keepdims=True) # B, N, 1, d_out
overall_info_max = tf.reduce_max(overall_info, axis=2, keepdims=True) # B, N, 1, d_out
overall_encoding = tf.concat([overall_info_max, overall_info_weighted_sum], axis=-1) # B, N, 1, 2*d_out
# Output Encoding
overall_encoding = helper_tf_util.conv2d(overall_encoding, d_out, [1, 1], name + 'mlp10', [1, 1], 'VALID', True, is_training) # B, N, 1, d_out
output_feat = helper_tf_util.conv2d(overall_encoding, d_out * 2, [1, 1], name + 'mlp11', [1, 1], 'VALID', True, is_training, activation_fn=tf.nn.leaky_relu) # B, N, 1, 2*d_out
return output_feat, shifted_neigh_xyz
@staticmethod
def random_sample(feature, pool_idx):
"""
:param feature: [B, N, d] input features matrix
:param pool_idx: [B, N', max_num] N' < N, N' is the selected position after pooling
:return: pool_features = [B, N', d] pooled features matrix
"""
feature = tf.squeeze(feature, axis=2)
num_neigh = tf.shape(pool_idx)[-1]
d = feature.get_shape()[-1]
batch_size = tf.shape(pool_idx)[0]
pool_idx = tf.reshape(pool_idx, [batch_size, -1])
pool_features = tf.batch_gather(feature, pool_idx)
pool_features = tf.reshape(pool_features, [batch_size, -1, num_neigh, d])
pool_features = tf.reduce_max(pool_features, axis=2, keepdims=True)
return pool_features
@staticmethod
def | nearest_interpolation | identifier_name |
|
BAAFNet.py | .op)
print(e.op.name)
print([t.name for t in e.op.inputs])
print([t.name for t in e.op.outputs])
a = 1 / 0
print('finished')
self.sess.close()
def evaluate(self, dataset):
# Initialise iterator with validation data
self.sess.run(dataset.val_init_op)
gt_classes = [0 for _ in range(self.config.num_classes)]
positive_classes = [0 for _ in range(self.config.num_classes)]
true_positive_classes = [0 for _ in range(self.config.num_classes)]
val_total_correct = 0
val_total_seen = 0
for step_id in range(self.config.val_steps):
if step_id % 50 == 0:
print(str(step_id) + ' / ' + str(self.config.val_steps))
try:
ops = (self.prob_logits, self.labels, self.accuracy)
stacked_prob, labels, acc = self.sess.run(ops, {self.is_training: False})
pred = np.argmax(stacked_prob, 1)
if not self.config.ignored_label_inds:
pred_valid = pred
labels_valid = labels
else:
invalid_idx = np.where(labels == self.config.ignored_label_inds)[0]
labels_valid = np.delete(labels, invalid_idx)
labels_valid = labels_valid - 1
pred_valid = np.delete(pred, invalid_idx)
correct = np.sum(pred_valid == labels_valid)
val_total_correct += correct
val_total_seen += len(labels_valid)
conf_matrix = confusion_matrix(labels_valid, pred_valid, np.arange(0, self.config.num_classes, 1))
gt_classes += np.sum(conf_matrix, axis=1)
positive_classes += np.sum(conf_matrix, axis=0)
true_positive_classes += np.diagonal(conf_matrix)
except tf.errors.OutOfRangeError:
break
iou_list = []
for n in range(0, self.config.num_classes, 1):
iou = true_positive_classes[n] / float(gt_classes[n] + positive_classes[n] - true_positive_classes[n])
iou_list.append(iou)
mean_iou = sum(iou_list) / float(self.config.num_classes)
log_out('eval accuracy: {}'.format(val_total_correct / float(val_total_seen)), self.Log_file)
log_out('mean IOU:{}'.format(mean_iou), self.Log_file)
mean_iou = 100 * mean_iou
log_out('Mean IoU = {:.1f}%'.format(mean_iou), self.Log_file)
s = '{:5.2f} | '.format(mean_iou)
for IoU in iou_list:
s += '{:5.2f} '.format(100 * IoU)
log_out('-' * len(s), self.Log_file)
log_out(s, self.Log_file)
log_out('-' * len(s) + '\n', self.Log_file)
return mean_iou
def get_loss(self, logits, labels, pre_cal_weights):
# calculate the weighted cross entropy according to the inverse frequency
class_weights = tf.convert_to_tensor(pre_cal_weights, dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=self.config.num_classes)
weights = tf.reduce_sum(class_weights * one_hot_labels, axis=1)
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
weighted_losses = unweighted_losses * weights
output_loss = tf.reduce_mean(weighted_losses)
return output_loss
def bilateral_context_block(self, feature, xyz, neigh_idx, d_out, name, is_training):
"""
Inputs:
feature: [B, N, 1, c] input features
xyz: [B, N, 3] input coordinates
neigh_idx: [B, N, k] indices of k neighbors
Output:
output_feat: [B, N, 1, 2*d_out] encoded (output) features
shifted_neigh_xyz: [B, N, k, 3] shifted neighbor coordinates, for augmentation loss
"""
batch_size = tf.shape(xyz)[0]
num_points = tf.shape(xyz)[1]
# Input Encoding
feature = helper_tf_util.conv2d(feature, d_out // 2, [1, 1], name + 'mlp1', [1, 1], 'VALID', True, is_training)
# Bilateral Augmentation
neigh_feat = self.gather_neighbour(tf.squeeze(feature, axis=2), neigh_idx) # B, N, k, d_out/2
neigh_xyz = self.gather_neighbour(xyz, neigh_idx) # B, N, k, 3
tile_feat = tf.tile(feature, [1, 1, self.config.k_n, 1]) # B, N, k, d_out/2
tile_xyz = tf.tile(tf.expand_dims(xyz, axis=2), [1, 1, self.config.k_n, 1]) # B, N, k, 3
feat_info = tf.concat([neigh_feat - tile_feat, tile_feat], axis=-1) # B, N, k, d_out
neigh_xyz_offsets = helper_tf_util.conv2d(feat_info, xyz.get_shape()[-1].value, [1, 1], name + 'mlp5', [1, 1], 'VALID', True, is_training) # B, N, k, 3
shifted_neigh_xyz = neigh_xyz + neigh_xyz_offsets # B, N, k, 3
xyz_info = tf.concat([neigh_xyz - tile_xyz, shifted_neigh_xyz, tile_xyz], axis=-1) # B, N, k, 9
neigh_feat_offsets = helper_tf_util.conv2d(xyz_info, feature.get_shape()[-1].value, [1, 1], name + 'mlp6', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
shifted_neigh_feat = neigh_feat + neigh_feat_offsets # B, N, k, d_out/2
xyz_encoding = helper_tf_util.conv2d(xyz_info, d_out//2, [1, 1], name + 'mlp7', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
feat_info = tf.concat([shifted_neigh_feat, feat_info], axis=-1) # B, N, k, 3/2*d_out
feat_encoding = helper_tf_util.conv2d(feat_info, d_out//2, [1, 1], name + 'mlp8', [1, 1], 'VALID', True, is_training) # B, N, k, d_out/2
# Mixed Local Aggregation
overall_info = tf.concat([xyz_encoding, feat_encoding], axis=-1) # B, N, k, d_out
k_weights = helper_tf_util.conv2d(overall_info, overall_info.get_shape()[-1].value, [1, 1], name + 'mlp9', [1, 1], 'VALID', bn=False, activation_fn=None) # B, N, k, d_out
k_weights = tf.nn.softmax(k_weights, axis=2) # B, N, k, d_out
overall_info_weighted_sum = tf.reduce_sum(overall_info * k_weights, axis=2, keepdims=True) # B, N, 1, d_out
overall_info_max = tf.reduce_max(overall_info, axis=2, keepdims=True) # B, N, 1, d_out
overall_encoding = tf.concat([overall_info_max, overall_info_weighted_sum], axis=-1) # B, N, 1, 2*d_out
# Output Encoding
overall_encoding = helper_tf_util.conv2d(overall_encoding, d_out, [1, 1], name + 'mlp10', [1, 1], 'VALID', True, is_training) # B, N, 1, d_out
output_feat = helper_tf_util.conv2d(overall_encoding, d_out * 2, [1, 1], name + 'mlp11', [1, 1], 'VALID', True, is_training, activation_fn=tf.nn.leaky_relu) # B, N, 1, 2*d_out
return output_feat, shifted_neigh_xyz
@staticmethod
def random_sample(feature, pool_idx):
| """
:param feature: [B, N, d] input features matrix
:param pool_idx: [B, N', max_num] N' < N, N' is the selected position after pooling
:return: pool_features = [B, N', d] pooled features matrix
"""
feature = tf.squeeze(feature, axis=2)
num_neigh = tf.shape(pool_idx)[-1]
d = feature.get_shape()[-1]
batch_size = tf.shape(pool_idx)[0]
pool_idx = tf.reshape(pool_idx, [batch_size, -1])
pool_features = tf.batch_gather(feature, pool_idx)
pool_features = tf.reshape(pool_features, [batch_size, -1, num_neigh, d])
pool_features = tf.reduce_max(pool_features, axis=2, keepdims=True)
return pool_features | identifier_body |
|
BAAFNet.py | )
f_weights_decoders.append(curr_weight)
# regress the fusion parameters
f_weights = tf.concat(f_weights_decoders, axis=-1)
f_weights = tf.nn.softmax(f_weights, axis=-1)
# adptively fuse them by calculating a weighted sum
f_decoder_final = tf.zeros_like(f_multi_decoder[-1])
for i in range(len(f_multi_decoder)):
f_decoder_final = f_decoder_final + tf.tile(tf.expand_dims(f_weights[:,:,:,i], axis=-1), [1, 1, 1, f_multi_decoder[i].get_shape()[-1].value]) * f_multi_decoder[i]
# ###########################Decoder############################
f_layer_fc1 = helper_tf_util.conv2d(f_decoder_final, 64, [1, 1], 'fc1', [1, 1], 'VALID', True, is_training)
f_layer_fc2 = helper_tf_util.conv2d(f_layer_fc1, 32, [1, 1], 'fc2', [1, 1], 'VALID', True, is_training)
f_layer_drop = helper_tf_util.dropout(f_layer_fc2, keep_prob=0.5, is_training=is_training, scope='dp1')
f_layer_fc3 = helper_tf_util.conv2d(f_layer_drop, self.config.num_classes, [1, 1], 'fc', [1, 1], 'VALID', False,
is_training, activation_fn=None)
f_out = tf.squeeze(f_layer_fc3, [2])
return f_out, new_xyz_list, xyz_list
def train(self, dataset):
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
self.sess.run(dataset.train_init_op)
while self.training_epoch < self.config.max_epoch:
t_start = time.time()
try:
ops = [self.train_op,
self.extra_update_ops,
self.merged,
self.loss,
self.logits,
self.labels,
self.accuracy]
_, _, summary, l_out, probs, labels, acc = self.sess.run(ops, {self.is_training: True})
self.train_writer.add_summary(summary, self.training_step)
t_end = time.time()
if self.training_step % 50 == 0:
message = 'Step {:08d} L_out={:5.3f} Acc={:4.2f} ''---{:8.2f} ms/batch'
log_out(message.format(self.training_step, l_out, acc, 1000 * (t_end - t_start)), self.Log_file)
self.training_step += 1
except tf.errors.OutOfRangeError:
m_iou = self.evaluate(dataset)
if m_iou > np.max(self.mIou_list):
# Save the best model
snapshot_directory = join(self.saving_path, 'snapshots')
makedirs(snapshot_directory) if not exists(snapshot_directory) else None
self.saver.save(self.sess, snapshot_directory + '/snap', global_step=self.training_step)
self.mIou_list.append(m_iou)
log_out('Best m_IoU is: {:5.3f}'.format(max(self.mIou_list)), self.Log_file)
self.training_epoch += 1
self.sess.run(dataset.train_init_op)
# Update learning rate
op = self.learning_rate.assign(tf.multiply(self.learning_rate,
self.config.lr_decays[self.training_epoch]))
self.sess.run(op)
log_out('****EPOCH {}****'.format(self.training_epoch), self.Log_file)
except tf.errors.InvalidArgumentError as e:
print('Caught a NaN error :')
print(e.error_code)
print(e.message)
print(e.op)
print(e.op.name)
print([t.name for t in e.op.inputs])
print([t.name for t in e.op.outputs])
a = 1 / 0
print('finished')
self.sess.close()
def evaluate(self, dataset):
# Initialise iterator with validation data
self.sess.run(dataset.val_init_op)
gt_classes = [0 for _ in range(self.config.num_classes)]
positive_classes = [0 for _ in range(self.config.num_classes)]
true_positive_classes = [0 for _ in range(self.config.num_classes)]
val_total_correct = 0
val_total_seen = 0
for step_id in range(self.config.val_steps):
if step_id % 50 == 0:
print(str(step_id) + ' / ' + str(self.config.val_steps))
try:
ops = (self.prob_logits, self.labels, self.accuracy)
stacked_prob, labels, acc = self.sess.run(ops, {self.is_training: False})
pred = np.argmax(stacked_prob, 1)
if not self.config.ignored_label_inds:
pred_valid = pred
labels_valid = labels
else:
invalid_idx = np.where(labels == self.config.ignored_label_inds)[0]
labels_valid = np.delete(labels, invalid_idx)
labels_valid = labels_valid - 1
pred_valid = np.delete(pred, invalid_idx)
correct = np.sum(pred_valid == labels_valid)
val_total_correct += correct
val_total_seen += len(labels_valid)
conf_matrix = confusion_matrix(labels_valid, pred_valid, np.arange(0, self.config.num_classes, 1))
gt_classes += np.sum(conf_matrix, axis=1)
positive_classes += np.sum(conf_matrix, axis=0)
true_positive_classes += np.diagonal(conf_matrix)
except tf.errors.OutOfRangeError:
break
iou_list = []
for n in range(0, self.config.num_classes, 1):
iou = true_positive_classes[n] / float(gt_classes[n] + positive_classes[n] - true_positive_classes[n])
iou_list.append(iou)
mean_iou = sum(iou_list) / float(self.config.num_classes)
log_out('eval accuracy: {}'.format(val_total_correct / float(val_total_seen)), self.Log_file)
log_out('mean IOU:{}'.format(mean_iou), self.Log_file)
mean_iou = 100 * mean_iou
log_out('Mean IoU = {:.1f}%'.format(mean_iou), self.Log_file)
s = '{:5.2f} | '.format(mean_iou)
for IoU in iou_list:
s += '{:5.2f} '.format(100 * IoU)
log_out('-' * len(s), self.Log_file)
log_out(s, self.Log_file)
log_out('-' * len(s) + '\n', self.Log_file)
return mean_iou
def get_loss(self, logits, labels, pre_cal_weights):
# calculate the weighted cross entropy according to the inverse frequency
class_weights = tf.convert_to_tensor(pre_cal_weights, dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=self.config.num_classes)
weights = tf.reduce_sum(class_weights * one_hot_labels, axis=1)
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
weighted_losses = unweighted_losses * weights
output_loss = tf.reduce_mean(weighted_losses)
return output_loss
def bilateral_context_block(self, feature, xyz, neigh_idx, d_out, name, is_training):
"""
Inputs:
feature: [B, N, 1, c] input features
xyz: [B, N, 3] input coordinates
neigh_idx: [B, N, k] indices of k neighbors
Output:
output_feat: [B, N, 1, 2*d_out] encoded (output) features
shifted_neigh_xyz: [B, N, k, 3] shifted neighbor coordinates, for augmentation loss
"""
batch_size = tf.shape(xyz)[0]
num_points = tf.shape(xyz)[1]
# Input Encoding
feature = helper_tf_util.conv2d(feature, d_out // 2, [1, 1], name + 'mlp1', [1, 1], 'VALID', True, is_training)
# Bilateral Augmentation
neigh_feat = self.gather_neighbour(tf.squeeze(feature, axis=2), neigh_idx) # B, N, k, d_out/2
neigh_xyz = self.gather_neighbour(xyz, neigh_idx) # B, N, k, 3
tile_feat = tf.tile(feature, [1, 1, self.config.k_n, 1]) # B, N, k, d_out/2
tile_xyz = tf.tile(tf.expand_dims(xyz, axis=2), [1, 1, self.config.k_n, 1]) # B, N, k, 3
feat_info = tf.concat([neigh_feat - tile_feat, tile_feat], axis=-1) # B, N, k, d_out | neigh_xyz_offsets = helper_tf_util.conv2d(feat_info, xyz.get_shape()[-1].value, [1, 1], name + 'mlp5', [1, 1], 'VALID', True, is_training) # B, N, k, 3
shifted_neigh_xyz = neigh_xyz + neigh_xyz_offsets # B, N, k, 3
xyz_info = tf.concat([neigh_xyz - tile_xyz, shifted_neigh_xyz, tile_xyz], axis=-1) # B, N, k, 9 | random_line_split |
|
BAAFNet.py |
else:
return tf.gather_nd(pts, idx), tf.gather_nd(feature, idx)
class Network:
def __init__(self, dataset, config):
flat_inputs = dataset.flat_inputs
self.config = config
# Path of the result folder
if self.config.saving:
if self.config.saving_path is None:
self.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
else:
self.saving_path = self.config.saving_path
makedirs(self.saving_path) if not exists(self.saving_path) else None
with tf.variable_scope('inputs'):
self.inputs = dict()
num_layers = self.config.num_layers
self.inputs['features'] = flat_inputs[0]
self.inputs['labels'] = flat_inputs[1]
self.inputs['input_inds'] = flat_inputs[2]
self.inputs['cloud_inds'] = flat_inputs[3]
self.labels = self.inputs['labels']
self.is_training = tf.placeholder(tf.bool, shape=())
self.training_step = 1
self.training_epoch = 0
self.correct_prediction = 0
self.accuracy = 0
self.mIou_list = [0]
self.class_weights = DP.get_class_weights(dataset.name)
self.time_stamp = time.strftime('_%Y-%m-%d_%H-%M-%S', time.gmtime())
self.Log_file = open('log_train_' + dataset.name + str(dataset.val_split) + self.time_stamp + '.txt', 'a')
with tf.variable_scope('layers'):
self.logits, self.new_xyz, self.xyz = self.inference(self.inputs, self.is_training)
#####################################################################
# Ignore the invalid point (unlabeled) when calculating the loss #
#####################################################################
with tf.variable_scope('loss'):
self.logits = tf.reshape(self.logits, [-1, config.num_classes])
self.labels = tf.reshape(self.labels, [-1])
# Boolean mask of points that should be ignored
ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
for ign_label in self.config.ignored_label_inds:
ignored_bool = tf.logical_or(ignored_bool, tf.equal(self.labels, ign_label))
# Collect logits and labels that are not ignored
valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
valid_logits = tf.gather(self.logits, valid_idx, axis=0)
valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)
# Reduce label values in the range of logit shape
reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
inserted_value = tf.zeros((1,), dtype=tf.int32)
for ign_label in self.config.ignored_label_inds:
reducing_list = tf.concat([reducing_list[:ign_label], inserted_value, reducing_list[ign_label:]], 0)
valid_labels = tf.gather(reducing_list, valid_labels_init)
aug_loss_weights = tf.constant([0.1, 0.1, 0.3, 0.5, 0.5])
aug_loss = 0
for i in range(self.config.num_layers):
centroids = tf.reduce_mean(self.new_xyz[i], axis=2)
relative_dis = tf.sqrt(tf.reduce_sum(tf.square(centroids-self.xyz[i]), axis=-1) + 1e-12)
aug_loss = aug_loss + aug_loss_weights[i] * tf.reduce_mean(tf.reduce_mean(relative_dis, axis=-1), axis=-1)
self.loss = self.get_loss(valid_logits, valid_labels, self.class_weights) + aug_loss
with tf.variable_scope('optimizer'):
self.learning_rate = tf.Variable(config.learning_rate, trainable=False, name='learning_rate')
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.variable_scope('results'):
self.correct_prediction = tf.nn.in_top_k(valid_logits, valid_labels, 1)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.prob_logits = tf.nn.softmax(self.logits)
tf.summary.scalar('learning_rate', self.learning_rate)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.saver = tf.train.Saver(my_vars, max_to_keep=100)
c_proto = tf.ConfigProto()
c_proto.gpu_options.allow_growth = True
self.sess = tf.Session(config=c_proto)
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(config.train_sum_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def inference(self, inputs, is_training):
d_out = self.config.d_out
ratio = self.config.sub_sampling_ratio
k_n = self.config.k_n
feature = inputs['features']
og_xyz = feature[:, :, :3]
feature = tf.layers.dense(feature, 8, activation=None, name='fc0')
feature = tf.nn.leaky_relu(tf.layers.batch_normalization(feature, -1, 0.99, 1e-6, training=is_training))
feature = tf.expand_dims(feature, axis=2)
# ###########################Encoder############################
f_encoder_list = []
input_xyz = og_xyz
input_up_samples = []
new_xyz_list = []
xyz_list = []
n_pts = self.config.num_points
for i in range(self.config.num_layers):
# Farthest Point Sampling:
input_neigh_idx = tf.py_func(DP.knn_search, [input_xyz, input_xyz, k_n], tf.int32)
n_pts = n_pts // ratio[i]
sub_xyz, inputs_sub_idx = tf.cond(tf.equal(is_training, tf.constant(True)), lambda: sampling(self.config.batch_size, n_pts, input_xyz, input_neigh_idx), lambda: sampling(self.config.val_batch_size, n_pts, input_xyz, input_neigh_idx))
inputs_interp_idx = tf.py_func(DP.knn_search, [sub_xyz, input_xyz, 1], tf.int32)
input_up_samples.append(inputs_interp_idx)
# Bilateral Context Encoding
f_encoder_i, new_xyz = self.bilateral_context_block(feature, input_xyz, input_neigh_idx, d_out[i],
'Encoder_layer_' + str(i), is_training)
f_sampled_i = self.random_sample(f_encoder_i, inputs_sub_idx)
feature = f_sampled_i
if i == 0:
f_encoder_list.append(f_encoder_i)
f_encoder_list.append(f_sampled_i)
xyz_list.append(input_xyz)
new_xyz_list.append(new_xyz)
input_xyz = sub_xyz
# ###########################Encoder############################
# ###########################Decoder############################
# Adaptive Fusion Module
f_multi_decoder = [] # full-sized feature maps
f_weights_decoders = [] # point-wise adaptive fusion weights
for n in range(self.config.num_layers):
feature = f_encoder_list[-1-n]
feature = helper_tf_util.conv2d(feature, feature.get_shape()[3].value, [1, 1],
'decoder_0' + str(n),
[1, 1], 'VALID', True, is_training)
f_decoder_list = []
for j in range(self.config.num_layers-n):
f_interp_i = self.nearest_interpolation(feature, input_up_samples[-j - 1 -n])
f_decoder_i = helper_tf_util.conv2d_transpose(tf.concat([f_encoder_list[-j - 2 -n], f_interp_i], axis=3),
f_encoder_list[-j - 2 -n].get_shape()[-1].value, [1, 1],
'Decoder_layer_' + str(n) + '_' + str(j), [1, 1], 'VALID', bn=True,
is_training=is_training)
feature = f_decoder_i
f_decoder_list.append(f_decoder_i)
# collect full-sized feature maps which are upsampled from multiple resolutions
f_multi_decoder.append(f_decoder_list[-1])
# summarize point-level information
curr_weight = helper_tf_util.conv2d(f_decoder_list[-1], 1, [1, 1], 'Decoder_weight_' + str(n), [1, 1], 'VALID', bn=False, activation_fn=None)
f_weights_decoders.append(curr_weight)
# regress the fusion parameters
f_weights = tf.concat(f_weights_decoders, axis=-1)
f_weights = tf.nn.softmax(f_weights, axis=-1)
# adptively fuse them by calculating a weighted sum
f_decoder_final = tf.zeros_like(f_multi_decoder[-1])
for i in range(len(f_multi_decoder)):
f_decoder_final = f_decoder_final + tf.tile(tf.expand_dims(f_weights[:,:,:,i], axis=-1), [1, 1, 1, f_multi_decoder[i].get_shape()[-1].value]) * f_multi_decoder[i]
# ###########################Decoder############################
f_layer_fc1 = helper_tf_util.conv2d(f_decoder_final, 64, [1, 1], 'fc1', [1, 1], 'VALID', True, is_training)
f_layer_fc2 = helper_tf_util.conv2d(f_layer_fc1, 32, | return tf.gather_nd(pts, idx) | conditional_block |
|
mod.rs | /html/rfc7230#section-2.6).
fn parse_request_version<T>(it: &mut StreamReader<T>) -> Result<(u8, u8), ParseError>
where T: Read {
let expected_it = "HTTP/".bytes();
for expected in expected_it {
match it.next() {
Some(b) if b == expected => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
}
let major = match it.next() {
Some(n) if n >= 48 && n <= 57 => n - 48,
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
};
match it.next() {
Some(b'.') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
let minor = match it.next() {
Some(n) if n >= 48 && n <= 57 => n - 48,
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
};
// Should now be at the end of the Request Line
match it.next() {
Some(b'\r') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
match it.next() {
Some(b'\n') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
Ok((major, minor))
}
/// Parse the request headers from `it` into `builder`, as specified in
/// [RFC 7230 §3.2](https://tools.ietf.org/html/rfc7230#section-3.2)
fn parse_headers<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> {
// An enum to store the current state of the parser
enum ParserState {
// After a new line, ready to parse the header name
Start,
// Currently parsing the header name
Name {name: Vec<u8>},
// Currently parsing the whitespace after the : but before the value
ValueLeadingWS {name: String},
// Currently parsing the value
Value {name: String, value: Vec<u8>},
// Currently parsing the new line (CR (here) LF)
NewLine,
// Currently parsing the final new line (CR LF CR (here) LF)
FinalNewLine,
};
let mut state = ParserState::Start;
'outer: loop {
let b = match it.next() {
None => return Err(ParseError::EOF),
Some(b) => b,
};
// Wrap this in a loop so that we can cheaply transition to a different state without having consumed
// any characters
loop {
match state {
ParserState::Start => match b {
b'\r' => state = ParserState::FinalNewLine,
_ => {
// Move straight into Name without consuming this character
state = ParserState::Name {
name: Vec::new()
};
continue;
}
},
ParserState::Name {name: mut n} => match TokenType::from(b) {
TChar(c) => {
n.push(c);
state = ParserState::Name {name: n}
},
Invalid(b':') => {
// Safe to convert to UTF-8 because it was constructed from just ASCII characters
let name = String::from_utf8(n).unwrap();
state = ParserState::ValueLeadingWS {name: name};
},
Invalid(_) => return Err(ParseError::IllegalCharacter),
},
ParserState::ValueLeadingWS {name: n} => match b {
b' ' | b'\t' => state = ParserState::ValueLeadingWS {name: n},
_ => {
// Move straight into Value without consuming
state = ParserState::Value {
name: n,
value: Vec::new()
};
continue;
}
},
ParserState::Value {name: n, value: mut v} => match b {
b'\t' | b' '...b'~' => {
v.push(b);
state = ParserState::Value {name: n, value: v};
},
0x80...0xFF => {
// The specification says that headers containing these characters SHOULD be considered as
// opaque data. However, doing that means we can't treat the headers as strings, because
// this would break UTF-8 compliance, thereby vastly increasing the complexity of the rest
// of the code. The non-ASCII characters will therefore be silently discarded
state = ParserState::Value {name: n, value: v};
}
b'\r' => {
// Because we discarded the invalid characters, it's safe to convert to UTF-8
let value = String::from_utf8(v).unwrap();
// Store the header
builder.add_header(n, value);
// Transition to expect the LF
state = ParserState::NewLine;
},
_ => return Err(ParseError::IllegalCharacter),
},
ParserState::NewLine => match b {
b'\n' => state = ParserState::Start,
_ => return Err(ParseError::IllegalCharacter),
},
ParserState::FinalNewLine => match b {
b'\n' => break 'outer,
_ => return Err(ParseError::IllegalCharacter),
}
}
// Consume the next character
break;
}
}
Ok(())
}
fn parse_body<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> {
println!("Starting parse body");
// TODO: can't read to end
Ok(())/*
match it.get_inner().read_to_end(builder.get_body()) {
Ok(0) => Ok(()),
Ok(_) => {
println!("Body read complete");
Ok(())
},
Err(e) => Err(ParseError::new_server_error(e)),
}*/
}
}
unsafe impl Send for Request {}
// nb. Not syncable until fully constructed (when the hashmap becomes effectively immutable)
// Public interface is completely syncable
unsafe impl Sync for Request {}
/// HTTP Methods (verbs), as defined by [RFC 7231 §4](https://tools.ietf.org/html/rfc7231#section-4)
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum Method {
Get,
Post,
Patch,
Delete,
Put,
Head,
Connect,
Options,
Trace,
Custom(Arc<Vec<u8>>),
}
impl Method {
/// Construct a `Method` from the corresponding case-sensitive name, provided as a vector of bytes.
/// Ownership of the vector is required to store the name in the event that it isn't a known method.
pub fn from(name: Vec<u8>) -> Method {
use self::Method::*;
if name.as_slice() == &b"GET"[..] { return Get };
if name.as_slice() == &b"POST"[..] { return Post };
if name.as_slice() == &b"PATCH"[..] { return Patch };
if name.as_slice() == &b"DELETE"[..] { return Delete };
if name.as_slice() == &b"PUT"[..] { return Put };
if name.as_slice() == &b"HEAD"[..] { return Head };
if name.as_slice() == &b"CONNECT"[..] { return Connect };
if name.as_slice() == &b"OPTIONS"[..] { return Options };
if name.as_slice() == &b"TRACE"[..] { return Trace };
return Custom(Arc::from(name));
}
}
unsafe impl Send for Method {}
/// A struct that can be used to incrementally build up a request, so the components are optional
#[derive(Debug, Eq, PartialEq)]
struct RequestBuilder {
version: Option<(u8, u8)>,
method: Option<Method>,
target: Option<String>,
headers: HashMap<String, String>,
body: Vec<u8>,
}
impl RequestBuilder {
/// Construct a new RequestBuilder
pub fn new() -> RequestBuilder {
RequestBuilder {
version: None,
method: None,
target: None,
headers: HashMap::new(),
body: Vec::new(),
}
}
/// Set the HTTP version of this request
pub fn set_version(&mut self, major: u8, minor: u8) {
self.version = Some((major, minor));
}
/// Set the request method
pub fn set_method(&mut self, method: Method) {
self.method = Some(method);
}
/// Set the request target
pub fn set_target(&mut self, target: String) {
self.target = Some(target);
}
/// Set the body of the request
pub fn get_body(&mut self) -> &mut Vec<u8> {
&mut self.body
}
/// Add a header. This method currently stores the latest version in the event of duplicate headers.
pub fn add_header(&mut self, key: String, val: String) {
self.headers.insert(key, val);
}
pub fn get_he | aders(&self | identifier_name |
|
mod.rs | , &mut it)?;
// Sanity checks
Request::parse_body(&mut builder, &mut it)?;
Ok(builder.into_request().unwrap())
}
/// Parse the request line, which is the first line of the request
///
/// It should have the form `Method Target HTTP/Version`, as defined in
/// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1).
fn parse_request_line<T>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError>
where T: Read {
// Request method
let method = Request::parse_request_method(it)?;
builder.set_method(method);
// Target
let target = Request::parse_request_target(it)?;
builder.set_target(target);
// Version
let version = Request::parse_request_version(it)?;
builder.set_version(version.0, version.1);
Ok(())
}
/// Parse the method (GET, POST, etc). It should be 1 or more visible characters, treated case-sensitively, and it
/// is followed by a single space (according to
/// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1)).
fn parse_request_method<T>(it: &mut StreamReader<T>) -> Result<Method, ParseError>
where T: Read {
let mut method = Vec::new();
// Read bytes
for b in it {
match TokenType::from(b) {
TChar(c) => method.push(c),
Invalid(b' ') => return Ok(Method::from(method)),
Invalid(_) => return Err(ParseError::IllegalCharacter),
}
}
Err(ParseError::EOF)
}
/// Parse the target (requested resource). The most general form is 1 or more visible characters (followed by a
/// single space), though more restrictive parsing would be permitted as defined in
/// [RFC 7230 §5.3](https://tools.ietf.org/html/rfc7230#section-5.3).
fn parse_request_target<T>(it: &mut StreamReader<T>) -> Result<String, ParseError>
where T: Read {
let mut target = Vec::new();
// Read bytes
for b in it {
match b {
// Allowed characters in URLs per [RFC 3986](https://tools.ietf.org/html/rfc3986#appendix-A)
b'!' | b'#'...b';' | b'=' | b'?'...b'[' | b']'...b'z' | b'|' | b'~' => target.push(b),
b' ' => return Ok(String::from_utf8(target).unwrap()), // Safe to unwrap because input is sanitised
_ => return Err(ParseError::IllegalCharacter),
}
}
Err(ParseError::EOF)
}
/// Parse the HTTP version, which should be HTTP/maj.min, where maj and min are single digits, as defined in
/// [RFC 7230 §2.6](https://tools.ietf.org/html/rfc7230#section-2.6).
fn parse_request_version<T>(it: &mut StreamReader<T>) -> Result<(u8, u8), ParseError>
where T: Read {
let expected_it = "HTTP/".bytes();
for expected in expected_it {
match it.next() {
Some(b) if b == expected => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
}
let major = match it.next() {
Some(n) if n >= 48 && n <= 57 => n - 48,
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
};
match it.next() {
Some(b'.') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
let minor = match it.next() {
Some(n) if n >= 48 && n <= 57 => n - 48,
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
};
// Should now be at the end of the Request Line
match it.next() {
Some(b'\r') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
match it.next() {
Some(b'\n') => (),
Some(_) => return Err(ParseError::IllegalCharacter),
None => return Err(ParseError::EOF),
}
Ok((major, minor))
}
/// Parse the request headers from `it` into `builder`, as specified in
/// [RFC 7230 §3.2](https://tools.ietf.org/html/rfc7230#section-3.2)
fn parse_headers<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> {
// An enum to store the current state of the parser
enum ParserState {
// After a new line, ready to parse the header name
Start,
// Currently parsing the header name
Name {name: Vec<u8>},
// Currently parsing the whitespace after the : but before the value
ValueLeadingWS {name: String},
// Currently parsing the value
Value {name: String, value: Vec<u8>},
// Currently parsing the new line (CR (here) LF)
NewLine,
// Currently parsing the final new line (CR LF CR (here) LF)
FinalNewLine,
};
let mut state = ParserState::Start;
'outer: loop {
let b = match it.next() {
None => return Err(ParseError::EOF),
Some(b) => b,
};
// Wrap this in a loop so that we can cheaply transition to a different state without having consumed
// any characters
loop {
match state {
ParserState::Start => match b {
b'\r' => state = ParserState::FinalNewLine,
_ => {
// Move straight into Name without consuming this character
state = ParserState::Name {
name: Vec::new()
};
continue;
}
},
ParserState::Name {name: mut n} => match TokenType::from(b) {
TChar(c) => {
n.push(c);
state = ParserState::Name {name: n}
},
Invalid(b':') => {
// Safe to convert to UTF-8 because it was constructed from just ASCII characters
let name = String::from_utf8(n).unwrap();
state = ParserState::ValueLeadingWS {name: name};
},
Invalid(_) => return Err(ParseError::IllegalCharacter),
},
ParserState::ValueLeadingWS {name: n} => match b {
b' ' | b'\t' => state = ParserState::ValueLeadingWS {name: n},
_ => {
// Move straight into Value without consuming
state = ParserState::Value {
name: n,
value: Vec::new()
};
continue;
}
},
ParserState::Value {name: n, value: mut v} => match b {
b'\t' | b' '...b'~' => {
v.push(b);
state = ParserState::Value {name: n, value: v};
},
0x80...0xFF => {
// The specification says that headers containing these characters SHOULD be considered as
// opaque data. However, doing that means we can't treat the headers as strings, because
// this would break UTF-8 compliance, thereby vastly increasing the complexity of the rest
// of the code. The non-ASCII characters will therefore be silently discarded
state = ParserState::Value {name: n, value: v};
}
b'\r' => {
// Because we discarded the invalid characters, it's safe to convert to UTF-8
let value = String::from_utf8(v).unwrap();
// Store the header
builder.add_header(n, value);
// Transition to expect the LF
state = ParserState::NewLine;
},
_ => return Err(ParseError::IllegalCharacter),
},
ParserState::NewLine => match b {
b'\n' => state = ParserState::Start,
_ => return Err(ParseError::IllegalCharacter),
},
ParserState::FinalNewLine => match b {
b'\n' => break 'outer,
_ => return Err(ParseError::IllegalCharacter),
}
}
// Consume the next character
break;
}
}
Ok(())
}
fn parse_body<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> {
println!("Starting parse body");
// TODO: can't read to end
Ok(())/* | match it.get_inner().read_to_end(builder.get_body()) {
Ok(0) => Ok(()),
Ok(_) => {
println!("Body read complete");
Ok(())
},
Err(e) => Err(ParseError::new_server | random_line_split |
|
listing.go | "`
SubPrefixInclude []string `env:"key=SUB_PREFIX_INCLUDE decode=yaml"`
// private (non-exported) vars for processing only; e.g. lookup maps
subPrefixExcludeLookup map[string]bool
subPrefixIncludeLookup map[string]bool
}
var cfg *config
// Don't take seriously, just playing around with func objects and redirection possibilities
var output OutputFunc
type OutputFunc func(p string)
func stdoutOutput(p string) {
fmt.Fprint(os.Stdout, p)
}
func stderrOutput(p string) {
fmt.Fprint(os.Stderr, p)
}
//var outputFileHandle *os.File
//func fileOutput(p string) {
// outputFileHandle.WriteString(p)
//}
type ListObjectProcessFunc func(o *ListObject)
func outputObject(o *ListObject) {
output(printObject(o))
}
var files []ListObject = make([]ListObject, 0)
func appendObject(o *ListObject) {
files = append(files, *o)
}
type ListAllObjectsRequest struct {
Region string
Bucket string
Prefix string
Process ListObjectProcessFunc
OutputFile string
}
func main() {
t := metrics.NewTimer()
metrics.Register("duration", t)
cfg = &config{}
if err := env.Process(cfg); err != nil {
log.Fatal(err)
}
// populate lookups for quick operations
cfg.subPrefixExcludeLookup = make(map[string]bool)
cfg.subPrefixIncludeLookup = make(map[string]bool)
for _, adv := range cfg.SubPrefixExclude {
cfg.subPrefixExcludeLookup[adv] = true
}
for _, adv := range cfg.SubPrefixInclude {
cfg.subPrefixIncludeLookup[adv] = true
}
t.Time(run)
log.Printf("Execution time: %v", time.Duration(t.Sum())*time.Nanosecond)
metrics.WriteJSONOnce(metrics.DefaultRegistry, os.Stderr)
}
func run() {
output = stdoutOutput
listAllObjectsQueue := make(chan ListAllObjectsRequest, 5)
var wg sync.WaitGroup
doneCallback := func() {
defer wg.Done()
log.Printf("worker done")
}
for i := 0; i < cfg.ListAllWorkersCount; i++ {
log.Printf("Starting worker%d", i+1)
worker := NewListObjectRequestWorker(i+1, listAllObjectsQueue, doneCallback)
wg.Add(1)
go func() {
// wg.Add(1)
worker.Start()
}()
}
go func() {
ListAllObjectsProducer(listAllObjectsQueue)
close(listAllObjectsQueue)
}()
wg.Wait()
}
func ListAllObjectsProducer(listAllObjectsQueue chan ListAllObjectsRequest) {
log.Println("Starting producer")
// Create services we will use, inlining the config instead of using `session`
// V4 signing requires to connect to the explicit region
s3Services := make(map[string]*s3.S3)
s3Services[cfg.Region] = s3.New(session.New(), aws.NewConfig().WithRegion(cfg.Region))
files = make([]ListObject, 0)
cfg.MaxKeys = cfg.AdvMaxKeys
cfg.MaxPages = cfg.AdvMaxPages
// Find files in the source buckets in different regions
listObjects(cfg.Bucket, cfg.Prefix, "/", s3Services[cfg.Region], appendObject)
// TODO fix this hack... these need be in context of request not globals
cfg.MaxKeys = cfg.FileMaxKeys
cfg.MaxPages = cfg.FileMaxPages
log.Printf("Found %v objects", len(files))
// files = make([]ListObject, 0)
// cfg.MaxPages = 1
// cfg.MaxKeys = 5
// // Find files in the source buckets in different regions
// listFiles(cfg.Bucket, cfg.Prefix, cfg.Delimiter, s3Services[cfg.Region], appendObject)
//
// log.Printf("Found %v files", len(files))
for _, file := range files {
if file.Type != "PREFIX" {
continue
}
fileParts := strings.Split(strings.Trim(file.File, "/"), "/")
advertiserId := fileParts[len(fileParts)-1]
outputFile := fmt.Sprintf("%v/advertiser_%v", cfg.OutputLocation, advertiserId)
if !cfg.Force {
if _, err := os.Stat(outputFile); !os.IsNotExist(err) {
log.Printf("%v file already exists, skipping", outputFile)
continue
}
}
if _, ok := cfg.subPrefixExcludeLookup[advertiserId]; ok == true {
log.Printf("%v is in the sub prefix exclude list", advertiserId)
continue
}
if len(cfg.subPrefixIncludeLookup) > 0 {
if _, ok := cfg.subPrefixIncludeLookup[advertiserId]; ok == false {
log.Printf("%v is not in the sub prefix include list", advertiserId)
continue
}
}
request := ListAllObjectsRequest{Region: cfg.Region, Bucket: cfg.Bucket, Prefix: fmt.Sprintf("%v%v/", cfg.Prefix, advertiserId), Process: outputObject, OutputFile: outputFile}
listAllObjectsQueue <- request
}
log.Println("Finish producer")
}
func (w ListObjectRequestWorker) Start() {
processRequestChan := make(chan ListAllObjectsRequest)
go func() {
defer close(processRequestChan)
for request := range w.RequestQueue {
log.Printf("popping request from queue for worker%d", w.ID)
select {
case processRequestChan <- request:
log.Printf("push request to process channel for worker%d", w.ID)
case <-w.QuitChan:
log.Printf("drainer for worker%d stopping", w.ID)
return
}
}
log.Printf("drainer for worker%d finished", w.ID)
}()
go func() {
defer w.DoneCallback()
for request := range processRequestChan {
log.Printf("processing request for worker%d", w.ID)
f, err := os.OpenFile(request.OutputFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
}
fileOuput := func(o *ListObject) {
f.WriteString(printObject(o))
}
f.WriteString("\n")
listObjects(request.Bucket, request.Prefix, "", s3.New(session.New(), aws.NewConfig().WithRegion(request.Region)), fileOuput)
f.Close()
}
log.Printf("processor for worker%d finished", w.ID)
}()
}
// f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
//
// if err != nil {
// log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// }
//
// outputFileHandle = f
// output = fileOutput
// defer f.Close()
// f.WriteString("\n")
// }
// }()
// select {
// case request := <-w.RequestQueue:
// // f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
// //
// // if err != nil {
// // log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// // }
// //
// // outputFileHandle = f
// // output = fileOutput
//
// // defer f.Close()
//
//
// // Find files in the source buckets in different regions
// listObjects(request.Bucket, request.Prefix, "", request.Region, request.Output)
//
// // f.WriteString("\n")
// case <-w.QuitChan:
// log.Printf("worker%d stopping", w.ID)
// return
// }
// }
// }()
//}
func (w ListObjectRequestWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
type ListObjectRequestWorker struct {
ID int
RequestQueue chan ListAllObjectsRequest
QuitChan chan bool
DoneCallback func()
}
func NewListObjectRequestWorker(id int, requestQueue chan ListAllObjectsRequest, doneCallback func()) ListObjectRequestWorker {
worker := ListObjectRequestWorker{
ID: id,
RequestQueue: requestQueue,
QuitChan: make(chan bool),
DoneCallback: doneCallback,
}
return worker
}
type ListObject struct {
Type string
Bucket string
File string
Size int64
LastModified time.Time
svc *s3.S3
}
func listObjects(bucket string, prefix string, delimiter string, svc *s3.S3, process ListObjectProcessFunc) | {
log.Printf("Retrieving object listing for %v/%v using service: %+v", bucket, prefix, svc.ClientInfo.Endpoint)
params := &s3.ListObjectsInput{
Bucket: aws.String(bucket), // Required
MaxKeys: aws.Int64(int64(cfg.MaxKeys)),
}
if delimiter != "" {
params.Delimiter = aws.String(delimiter)
}
if prefix != "" {
params.Prefix = aws.String(prefix)
}
pageNum := 0
objectNum := 0
err := svc.ListObjectsPages(params, func(page *s3.ListObjectsOutput, lastPage bool) bool { | identifier_body |
|
listing.go | "`
OutputFormat string `env:"key=OUTPUT_FORMAT default=csv"`
OutputLocation string `env:"key=OUTPUT_LOCATION default=./target"`
ListAllWorkersCount int `env:"key=LIST_ALL_WORKERS_COUNT default=5"`
SubPrefixExclude []string `env:"key=SUB_PREFIX_EXCLUDE decode=yaml"`
SubPrefixInclude []string `env:"key=SUB_PREFIX_INCLUDE decode=yaml"`
// private (non-exported) vars for processing only; e.g. lookup maps
subPrefixExcludeLookup map[string]bool
subPrefixIncludeLookup map[string]bool
}
var cfg *config
// Don't take seriously, just playing around with func objects and redirection possibilities
var output OutputFunc
type OutputFunc func(p string)
func stdoutOutput(p string) {
fmt.Fprint(os.Stdout, p)
}
func stderrOutput(p string) {
fmt.Fprint(os.Stderr, p)
}
//var outputFileHandle *os.File
//func fileOutput(p string) {
// outputFileHandle.WriteString(p)
//}
type ListObjectProcessFunc func(o *ListObject)
func outputObject(o *ListObject) {
output(printObject(o))
}
var files []ListObject = make([]ListObject, 0)
func appendObject(o *ListObject) {
files = append(files, *o)
}
type ListAllObjectsRequest struct {
Region string
Bucket string
Prefix string
Process ListObjectProcessFunc
OutputFile string
}
func main() {
t := metrics.NewTimer()
metrics.Register("duration", t)
cfg = &config{}
if err := env.Process(cfg); err != nil {
log.Fatal(err)
}
// populate lookups for quick operations
cfg.subPrefixExcludeLookup = make(map[string]bool)
cfg.subPrefixIncludeLookup = make(map[string]bool)
for _, adv := range cfg.SubPrefixExclude {
cfg.subPrefixExcludeLookup[adv] = true
}
for _, adv := range cfg.SubPrefixInclude {
cfg.subPrefixIncludeLookup[adv] = true
}
t.Time(run)
log.Printf("Execution time: %v", time.Duration(t.Sum())*time.Nanosecond)
metrics.WriteJSONOnce(metrics.DefaultRegistry, os.Stderr)
}
func run() {
output = stdoutOutput
listAllObjectsQueue := make(chan ListAllObjectsRequest, 5)
var wg sync.WaitGroup
doneCallback := func() {
defer wg.Done()
log.Printf("worker done")
}
for i := 0; i < cfg.ListAllWorkersCount; i++ {
log.Printf("Starting worker%d", i+1)
worker := NewListObjectRequestWorker(i+1, listAllObjectsQueue, doneCallback)
wg.Add(1)
go func() {
// wg.Add(1)
worker.Start()
}()
}
go func() {
ListAllObjectsProducer(listAllObjectsQueue)
close(listAllObjectsQueue)
}()
wg.Wait()
}
func ListAllObjectsProducer(listAllObjectsQueue chan ListAllObjectsRequest) {
log.Println("Starting producer")
// Create services we will use, inlining the config instead of using `session`
// V4 signing requires to connect to the explicit region
s3Services := make(map[string]*s3.S3)
s3Services[cfg.Region] = s3.New(session.New(), aws.NewConfig().WithRegion(cfg.Region))
files = make([]ListObject, 0)
cfg.MaxKeys = cfg.AdvMaxKeys
cfg.MaxPages = cfg.AdvMaxPages
// Find files in the source buckets in different regions
listObjects(cfg.Bucket, cfg.Prefix, "/", s3Services[cfg.Region], appendObject)
// TODO fix this hack... these need be in context of request not globals
cfg.MaxKeys = cfg.FileMaxKeys
cfg.MaxPages = cfg.FileMaxPages
log.Printf("Found %v objects", len(files))
// files = make([]ListObject, 0)
// cfg.MaxPages = 1
// cfg.MaxKeys = 5
// // Find files in the source buckets in different regions
// listFiles(cfg.Bucket, cfg.Prefix, cfg.Delimiter, s3Services[cfg.Region], appendObject)
//
// log.Printf("Found %v files", len(files))
for _, file := range files {
if file.Type != "PREFIX" {
continue
}
fileParts := strings.Split(strings.Trim(file.File, "/"), "/")
advertiserId := fileParts[len(fileParts)-1]
outputFile := fmt.Sprintf("%v/advertiser_%v", cfg.OutputLocation, advertiserId)
if !cfg.Force {
if _, err := os.Stat(outputFile); !os.IsNotExist(err) {
log.Printf("%v file already exists, skipping", outputFile)
continue
}
}
if _, ok := cfg.subPrefixExcludeLookup[advertiserId]; ok == true {
log.Printf("%v is in the sub prefix exclude list", advertiserId)
continue
}
if len(cfg.subPrefixIncludeLookup) > 0 {
if _, ok := cfg.subPrefixIncludeLookup[advertiserId]; ok == false {
log.Printf("%v is not in the sub prefix include list", advertiserId)
continue
}
}
request := ListAllObjectsRequest{Region: cfg.Region, Bucket: cfg.Bucket, Prefix: fmt.Sprintf("%v%v/", cfg.Prefix, advertiserId), Process: outputObject, OutputFile: outputFile}
listAllObjectsQueue <- request
}
log.Println("Finish producer")
}
func (w ListObjectRequestWorker) Start() {
processRequestChan := make(chan ListAllObjectsRequest)
go func() {
defer close(processRequestChan)
for request := range w.RequestQueue {
log.Printf("popping request from queue for worker%d", w.ID)
select {
case processRequestChan <- request:
log.Printf("push request to process channel for worker%d", w.ID)
case <-w.QuitChan:
log.Printf("drainer for worker%d stopping", w.ID)
return
}
}
log.Printf("drainer for worker%d finished", w.ID)
}()
go func() {
defer w.DoneCallback()
for request := range processRequestChan {
log.Printf("processing request for worker%d", w.ID)
f, err := os.OpenFile(request.OutputFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
}
fileOuput := func(o *ListObject) {
f.WriteString(printObject(o))
}
f.WriteString("\n")
listObjects(request.Bucket, request.Prefix, "", s3.New(session.New(), aws.NewConfig().WithRegion(request.Region)), fileOuput)
f.Close()
}
log.Printf("processor for worker%d finished", w.ID)
}()
}
// f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
//
// if err != nil {
// log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// }
//
// outputFileHandle = f
// output = fileOutput
// defer f.Close()
// f.WriteString("\n")
// }
// }()
// select {
// case request := <-w.RequestQueue:
// // f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
// //
// // if err != nil {
// // log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// // }
// //
// // outputFileHandle = f
// // output = fileOutput
//
// // defer f.Close()
//
//
// // Find files in the source buckets in different regions
// listObjects(request.Bucket, request.Prefix, "", request.Region, request.Output)
//
// // f.WriteString("\n")
// case <-w.QuitChan:
// log.Printf("worker%d stopping", w.ID)
// return
// }
// }
// }()
//}
func (w ListObjectRequestWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
type ListObjectRequestWorker struct {
ID int
RequestQueue chan ListAllObjectsRequest
QuitChan chan bool
DoneCallback func()
}
func NewListObjectRequestWorker(id int, requestQueue chan ListAllObjectsRequest, doneCallback func()) ListObjectRequestWorker {
worker := ListObjectRequestWorker{
ID: id,
RequestQueue: requestQueue,
QuitChan: make(chan bool),
DoneCallback: doneCallback,
}
return worker
}
type ListObject struct {
Type string
Bucket string
File string
Size int64
LastModified time.Time
svc *s3.S3
}
func listObjects(bucket string, prefix string, delimiter string, svc *s3.S3, process ListObjectProcessFunc) {
log.Printf("Retrieving object listing for %v/%v using service: %+v", bucket, prefix, svc.ClientInfo.Endpoint)
| params := &s3.ListObjectsInput{
Bucket: aws.String(bucket), // Required
MaxKeys: aws.Int64(int64(cfg.MaxKeys)), | random_line_split |
|
listing.go | Keys int `env:"key=FILE_MAX_KEYS default=1000"`
Force bool `env:"key=FORCE default=false"`
Region string `env:"key=AWS_REGION required=true"`
Bucket string `env:"key=AWS_BUCKET required=true"`
Prefix string `env:"key=AWS_PREFIX required=true"`
OutputFormat string `env:"key=OUTPUT_FORMAT default=csv"`
OutputLocation string `env:"key=OUTPUT_LOCATION default=./target"`
ListAllWorkersCount int `env:"key=LIST_ALL_WORKERS_COUNT default=5"`
SubPrefixExclude []string `env:"key=SUB_PREFIX_EXCLUDE decode=yaml"`
SubPrefixInclude []string `env:"key=SUB_PREFIX_INCLUDE decode=yaml"`
// private (non-exported) vars for processing only; e.g. lookup maps
subPrefixExcludeLookup map[string]bool
subPrefixIncludeLookup map[string]bool
}
var cfg *config
// Don't take seriously, just playing around with func objects and redirection possibilities
var output OutputFunc
type OutputFunc func(p string)
func stdoutOutput(p string) {
fmt.Fprint(os.Stdout, p)
}
func stderrOutput(p string) {
fmt.Fprint(os.Stderr, p)
}
//var outputFileHandle *os.File
//func fileOutput(p string) {
// outputFileHandle.WriteString(p)
//}
type ListObjectProcessFunc func(o *ListObject)
func outputObject(o *ListObject) {
output(printObject(o))
}
var files []ListObject = make([]ListObject, 0)
func appendObject(o *ListObject) {
files = append(files, *o)
}
type ListAllObjectsRequest struct {
Region string
Bucket string
Prefix string
Process ListObjectProcessFunc
OutputFile string
}
func main() {
t := metrics.NewTimer()
metrics.Register("duration", t)
cfg = &config{}
if err := env.Process(cfg); err != nil {
log.Fatal(err)
}
// populate lookups for quick operations
cfg.subPrefixExcludeLookup = make(map[string]bool)
cfg.subPrefixIncludeLookup = make(map[string]bool)
for _, adv := range cfg.SubPrefixExclude {
cfg.subPrefixExcludeLookup[adv] = true
}
for _, adv := range cfg.SubPrefixInclude {
cfg.subPrefixIncludeLookup[adv] = true
}
t.Time(run)
log.Printf("Execution time: %v", time.Duration(t.Sum())*time.Nanosecond)
metrics.WriteJSONOnce(metrics.DefaultRegistry, os.Stderr)
}
func run() {
output = stdoutOutput
listAllObjectsQueue := make(chan ListAllObjectsRequest, 5)
var wg sync.WaitGroup
doneCallback := func() {
defer wg.Done()
log.Printf("worker done")
}
for i := 0; i < cfg.ListAllWorkersCount; i++ |
go func() {
ListAllObjectsProducer(listAllObjectsQueue)
close(listAllObjectsQueue)
}()
wg.Wait()
}
func ListAllObjectsProducer(listAllObjectsQueue chan ListAllObjectsRequest) {
log.Println("Starting producer")
// Create services we will use, inlining the config instead of using `session`
// V4 signing requires to connect to the explicit region
s3Services := make(map[string]*s3.S3)
s3Services[cfg.Region] = s3.New(session.New(), aws.NewConfig().WithRegion(cfg.Region))
files = make([]ListObject, 0)
cfg.MaxKeys = cfg.AdvMaxKeys
cfg.MaxPages = cfg.AdvMaxPages
// Find files in the source buckets in different regions
listObjects(cfg.Bucket, cfg.Prefix, "/", s3Services[cfg.Region], appendObject)
// TODO fix this hack... these need be in context of request not globals
cfg.MaxKeys = cfg.FileMaxKeys
cfg.MaxPages = cfg.FileMaxPages
log.Printf("Found %v objects", len(files))
// files = make([]ListObject, 0)
// cfg.MaxPages = 1
// cfg.MaxKeys = 5
// // Find files in the source buckets in different regions
// listFiles(cfg.Bucket, cfg.Prefix, cfg.Delimiter, s3Services[cfg.Region], appendObject)
//
// log.Printf("Found %v files", len(files))
for _, file := range files {
if file.Type != "PREFIX" {
continue
}
fileParts := strings.Split(strings.Trim(file.File, "/"), "/")
advertiserId := fileParts[len(fileParts)-1]
outputFile := fmt.Sprintf("%v/advertiser_%v", cfg.OutputLocation, advertiserId)
if !cfg.Force {
if _, err := os.Stat(outputFile); !os.IsNotExist(err) {
log.Printf("%v file already exists, skipping", outputFile)
continue
}
}
if _, ok := cfg.subPrefixExcludeLookup[advertiserId]; ok == true {
log.Printf("%v is in the sub prefix exclude list", advertiserId)
continue
}
if len(cfg.subPrefixIncludeLookup) > 0 {
if _, ok := cfg.subPrefixIncludeLookup[advertiserId]; ok == false {
log.Printf("%v is not in the sub prefix include list", advertiserId)
continue
}
}
request := ListAllObjectsRequest{Region: cfg.Region, Bucket: cfg.Bucket, Prefix: fmt.Sprintf("%v%v/", cfg.Prefix, advertiserId), Process: outputObject, OutputFile: outputFile}
listAllObjectsQueue <- request
}
log.Println("Finish producer")
}
func (w ListObjectRequestWorker) Start() {
processRequestChan := make(chan ListAllObjectsRequest)
go func() {
defer close(processRequestChan)
for request := range w.RequestQueue {
log.Printf("popping request from queue for worker%d", w.ID)
select {
case processRequestChan <- request:
log.Printf("push request to process channel for worker%d", w.ID)
case <-w.QuitChan:
log.Printf("drainer for worker%d stopping", w.ID)
return
}
}
log.Printf("drainer for worker%d finished", w.ID)
}()
go func() {
defer w.DoneCallback()
for request := range processRequestChan {
log.Printf("processing request for worker%d", w.ID)
f, err := os.OpenFile(request.OutputFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
}
fileOuput := func(o *ListObject) {
f.WriteString(printObject(o))
}
f.WriteString("\n")
listObjects(request.Bucket, request.Prefix, "", s3.New(session.New(), aws.NewConfig().WithRegion(request.Region)), fileOuput)
f.Close()
}
log.Printf("processor for worker%d finished", w.ID)
}()
}
// f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
//
// if err != nil {
// log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// }
//
// outputFileHandle = f
// output = fileOutput
// defer f.Close()
// f.WriteString("\n")
// }
// }()
// select {
// case request := <-w.RequestQueue:
// // f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
// //
// // if err != nil {
// // log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// // }
// //
// // outputFileHandle = f
// // output = fileOutput
//
// // defer f.Close()
//
//
// // Find files in the source buckets in different regions
// listObjects(request.Bucket, request.Prefix, "", request.Region, request.Output)
//
// // f.WriteString("\n")
// case <-w.QuitChan:
// log.Printf("worker%d stopping", w.ID)
// return
// }
// }
// }()
//}
func (w ListObjectRequestWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
type ListObjectRequestWorker struct {
ID int
RequestQueue chan ListAllObjectsRequest
QuitChan chan bool
DoneCallback func()
}
func NewListObjectRequestWorker(id int, requestQueue chan ListAllObjectsRequest, doneCallback func()) ListObjectRequestWorker {
worker := ListObjectRequestWorker{
ID: id,
RequestQueue: requestQueue,
QuitChan: make(chan bool),
DoneCallback: doneCallback,
}
return worker
}
type ListObject struct {
Type string
Bucket string
File string
Size int64
LastModified time.Time
svc *s3.S3
}
func listObjects(bucket string, prefix string, delimiter string | {
log.Printf("Starting worker%d", i+1)
worker := NewListObjectRequestWorker(i+1, listAllObjectsQueue, doneCallback)
wg.Add(1)
go func() {
// wg.Add(1)
worker.Start()
}()
} | conditional_block |
listing.go | Keys int `env:"key=FILE_MAX_KEYS default=1000"`
Force bool `env:"key=FORCE default=false"`
Region string `env:"key=AWS_REGION required=true"`
Bucket string `env:"key=AWS_BUCKET required=true"`
Prefix string `env:"key=AWS_PREFIX required=true"`
OutputFormat string `env:"key=OUTPUT_FORMAT default=csv"`
OutputLocation string `env:"key=OUTPUT_LOCATION default=./target"`
ListAllWorkersCount int `env:"key=LIST_ALL_WORKERS_COUNT default=5"`
SubPrefixExclude []string `env:"key=SUB_PREFIX_EXCLUDE decode=yaml"`
SubPrefixInclude []string `env:"key=SUB_PREFIX_INCLUDE decode=yaml"`
// private (non-exported) vars for processing only; e.g. lookup maps
subPrefixExcludeLookup map[string]bool
subPrefixIncludeLookup map[string]bool
}
var cfg *config
// Don't take seriously, just playing around with func objects and redirection possibilities
var output OutputFunc
type OutputFunc func(p string)
func stdoutOutput(p string) {
fmt.Fprint(os.Stdout, p)
}
func stderrOutput(p string) {
fmt.Fprint(os.Stderr, p)
}
//var outputFileHandle *os.File
//func fileOutput(p string) {
// outputFileHandle.WriteString(p)
//}
type ListObjectProcessFunc func(o *ListObject)
func outputObject(o *ListObject) {
output(printObject(o))
}
var files []ListObject = make([]ListObject, 0)
func appendObject(o *ListObject) {
files = append(files, *o)
}
type ListAllObjectsRequest struct {
Region string
Bucket string
Prefix string
Process ListObjectProcessFunc
OutputFile string
}
func main() {
t := metrics.NewTimer()
metrics.Register("duration", t)
cfg = &config{}
if err := env.Process(cfg); err != nil {
log.Fatal(err)
}
// populate lookups for quick operations
cfg.subPrefixExcludeLookup = make(map[string]bool)
cfg.subPrefixIncludeLookup = make(map[string]bool)
for _, adv := range cfg.SubPrefixExclude {
cfg.subPrefixExcludeLookup[adv] = true
}
for _, adv := range cfg.SubPrefixInclude {
cfg.subPrefixIncludeLookup[adv] = true
}
t.Time(run)
log.Printf("Execution time: %v", time.Duration(t.Sum())*time.Nanosecond)
metrics.WriteJSONOnce(metrics.DefaultRegistry, os.Stderr)
}
func | () {
output = stdoutOutput
listAllObjectsQueue := make(chan ListAllObjectsRequest, 5)
var wg sync.WaitGroup
doneCallback := func() {
defer wg.Done()
log.Printf("worker done")
}
for i := 0; i < cfg.ListAllWorkersCount; i++ {
log.Printf("Starting worker%d", i+1)
worker := NewListObjectRequestWorker(i+1, listAllObjectsQueue, doneCallback)
wg.Add(1)
go func() {
// wg.Add(1)
worker.Start()
}()
}
go func() {
ListAllObjectsProducer(listAllObjectsQueue)
close(listAllObjectsQueue)
}()
wg.Wait()
}
func ListAllObjectsProducer(listAllObjectsQueue chan ListAllObjectsRequest) {
log.Println("Starting producer")
// Create services we will use, inlining the config instead of using `session`
// V4 signing requires to connect to the explicit region
s3Services := make(map[string]*s3.S3)
s3Services[cfg.Region] = s3.New(session.New(), aws.NewConfig().WithRegion(cfg.Region))
files = make([]ListObject, 0)
cfg.MaxKeys = cfg.AdvMaxKeys
cfg.MaxPages = cfg.AdvMaxPages
// Find files in the source buckets in different regions
listObjects(cfg.Bucket, cfg.Prefix, "/", s3Services[cfg.Region], appendObject)
// TODO fix this hack... these need be in context of request not globals
cfg.MaxKeys = cfg.FileMaxKeys
cfg.MaxPages = cfg.FileMaxPages
log.Printf("Found %v objects", len(files))
// files = make([]ListObject, 0)
// cfg.MaxPages = 1
// cfg.MaxKeys = 5
// // Find files in the source buckets in different regions
// listFiles(cfg.Bucket, cfg.Prefix, cfg.Delimiter, s3Services[cfg.Region], appendObject)
//
// log.Printf("Found %v files", len(files))
for _, file := range files {
if file.Type != "PREFIX" {
continue
}
fileParts := strings.Split(strings.Trim(file.File, "/"), "/")
advertiserId := fileParts[len(fileParts)-1]
outputFile := fmt.Sprintf("%v/advertiser_%v", cfg.OutputLocation, advertiserId)
if !cfg.Force {
if _, err := os.Stat(outputFile); !os.IsNotExist(err) {
log.Printf("%v file already exists, skipping", outputFile)
continue
}
}
if _, ok := cfg.subPrefixExcludeLookup[advertiserId]; ok == true {
log.Printf("%v is in the sub prefix exclude list", advertiserId)
continue
}
if len(cfg.subPrefixIncludeLookup) > 0 {
if _, ok := cfg.subPrefixIncludeLookup[advertiserId]; ok == false {
log.Printf("%v is not in the sub prefix include list", advertiserId)
continue
}
}
request := ListAllObjectsRequest{Region: cfg.Region, Bucket: cfg.Bucket, Prefix: fmt.Sprintf("%v%v/", cfg.Prefix, advertiserId), Process: outputObject, OutputFile: outputFile}
listAllObjectsQueue <- request
}
log.Println("Finish producer")
}
func (w ListObjectRequestWorker) Start() {
processRequestChan := make(chan ListAllObjectsRequest)
go func() {
defer close(processRequestChan)
for request := range w.RequestQueue {
log.Printf("popping request from queue for worker%d", w.ID)
select {
case processRequestChan <- request:
log.Printf("push request to process channel for worker%d", w.ID)
case <-w.QuitChan:
log.Printf("drainer for worker%d stopping", w.ID)
return
}
}
log.Printf("drainer for worker%d finished", w.ID)
}()
go func() {
defer w.DoneCallback()
for request := range processRequestChan {
log.Printf("processing request for worker%d", w.ID)
f, err := os.OpenFile(request.OutputFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
}
fileOuput := func(o *ListObject) {
f.WriteString(printObject(o))
}
f.WriteString("\n")
listObjects(request.Bucket, request.Prefix, "", s3.New(session.New(), aws.NewConfig().WithRegion(request.Region)), fileOuput)
f.Close()
}
log.Printf("processor for worker%d finished", w.ID)
}()
}
// f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
//
// if err != nil {
// log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// }
//
// outputFileHandle = f
// output = fileOutput
// defer f.Close()
// f.WriteString("\n")
// }
// }()
// select {
// case request := <-w.RequestQueue:
// // f, err := os.OpenFile(request.OutputFile, os.O_WRONLY | os.O_CREATE, 0666)
// //
// // if err != nil {
// // log.Fatalf("Cannot create file: %v | %v", request.OutputFile, err)
// // }
// //
// // outputFileHandle = f
// // output = fileOutput
//
// // defer f.Close()
//
//
// // Find files in the source buckets in different regions
// listObjects(request.Bucket, request.Prefix, "", request.Region, request.Output)
//
// // f.WriteString("\n")
// case <-w.QuitChan:
// log.Printf("worker%d stopping", w.ID)
// return
// }
// }
// }()
//}
func (w ListObjectRequestWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
type ListObjectRequestWorker struct {
ID int
RequestQueue chan ListAllObjectsRequest
QuitChan chan bool
DoneCallback func()
}
func NewListObjectRequestWorker(id int, requestQueue chan ListAllObjectsRequest, doneCallback func()) ListObjectRequestWorker {
worker := ListObjectRequestWorker{
ID: id,
RequestQueue: requestQueue,
QuitChan: make(chan bool),
DoneCallback: doneCallback,
}
return worker
}
type ListObject struct {
Type string
Bucket string
File string
Size int64
LastModified time.Time
svc *s3.S3
}
func listObjects(bucket string, prefix string, delimiter string, | run | identifier_name |
spacing.rs | Field ( .. ) |
ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset)
};
let f: fn ( & _ , _ ) -> _ = unimplemented ! () ;
let f = unimplemented ! {} ;
{
foo ( ) ;
}
for & (sample, radiance) in samples.iter() {}
map(|& s| moo());
match x {
S{foo}=>92
}
}
enum Message {
Quit ,
ChangeColor ( i32 , i32 , i32 ) ,
Move { x : i32 , y : i32 } ,
Write ( String ) ,
}
enum Foo{
Bar = 123 ,
Baz=0
}
pub struct Vec < T > {
buf : RawVec < T> ,
len :usize ,
}
impl <T >Vec < T > {
pub fn new ( ) -> Vec <T> {
Vec {
buf : RawVec :: new ( ) ,
len :0,
}
}
pub fn with_capacity (capacity :usize)->Vec <T>{
Vec {
buf:RawVec::with_capacity ( capacity ),
len:0,
}
}
pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T> |
pub fn capacity( & self ) -> usize {
self . buf . cap ( )
}
pub fn reserve(& mut self, additional: usize) {
self. buf .reserve(self. len ,additional) ;
}
pub fn into_boxed_slice( mut self ) -> Box < [ T ] > {
unsafe{
self . shrink_to_fit ( ) ;
let buf = ptr::read( & self . buf );
mem :: forget ( self ) ;
buf.into_box()
}
}
pub fn truncate(&mut self ,len: usize) {
unsafe {
while len < self . len {
self . len -= 1 ;
let len = self . len ;
ptr::drop_in_place(self.get_unchecked_mut(len));
}
}
}
pub fn as_slice(& self) -> & [T] {
self
}
pub fn as_mut_slice(&mut self) -> & mut[T] {
& mut self [ .. ]
}
pub unsafe fn set_len(& mut self, len: usize) {
self . len = len;
}
pub fn remove(&mut self, index: usize) -> T {
let len = self.len();
assert!(index < len);
unsafe {
let ret;
{
let ptr = self.as_mut_ptr().offset(index as isize);
ret = ptr::read(ptr);
ptr::copy(ptr.offset (1), ptr, len-index-1);
}
self.set_len(len - 1);
ret
}
}
pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool
{
let len = self.len();
let mut del = 0;
{
let v = & mut * * self ;
for i in 0 .. len {
if ! f ( & v [ i ] ) {
del += 1 ;
} else if del > 0 {
v.swap(i - del, i);
}
}
}
if del>0{
self.truncate(len-del);
}
}
pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{
let len = self.len();
let start = * range.start() .unwrap_or( & 0 ) ;
let end = * range. end().unwrap_or( & len ) ;
assert!(start <= end);
assert!(end <= len);
}
}
impl<T:Clone>Vec<T>{
pub fn extend_from_slice(&mut self, other: & [ T ] ){
self.reserve(other.len());
for i in 0..other.len(){
let len = self.len();
unsafe {
ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone());
self.set_len(len + 1);
}
}
}
}
impl < T : PartialEq > Vec < T > {
pub fn dedup ( & mut self ) {
unsafe{
let ln = self.len();
if ln <= 1 {
return ;
}
let p = self.as_mut_ptr();
let mut r:usize =1 ;
let mut w:usize=1;
while r < ln {
let p_r = p.offset( r as isize );
let p_wm1 = p.offset ( ( w - 1 )as isize );
if * p_r !=* p_wm1 {
if r!=w{
let p_w = p_wm1.offset(1);
mem::swap( & mut * p_r , & mut * p_w );
}
w += 1;
}
r+=1 ;
}
self.truncate(w);
}
}
}
pub fn from_elem < T : Clone > ( elem :T ,n:usize) -> Vec <T>{
}
impl < T :Clone >Clone for Vec <T>{
fn clone(&self) -> Vec<T> {
< [ T ] > :: to_vec ( & * * self )
}
fn clone(&self) -> Vec<T> {
:: slice ::to_vec( &** self)
}
fn clone_from(&mut self, other:& Vec <T>) {
self.truncate(other . len());
let len=self. len();
self.clone_from_slice(& other [ .. len ]);
self.extend_from_slice(& other[ len .. ]);
}
}
impl< T:Hash>Hash for Vec<T> {
fn hash<H :hash :: Hasher >( & self, state: &mut H) {
Hash::hash(& **self, state)
}
}
impl<T> Index < usize > for Vec < T > {
type Output = T ;
fn index(&self, index: usize) ->& T {
& ( * * self ) [ index ]
}
}
impl < T > IndexMut < usize > for Vec < T > {
fn index_mut(&mut self, index: usize) -> &mut T {
& mut ( * * self ) [ index ]
}
}
impl<T> FromIterator<T> for Vec<T> {
fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > {
let mut iterator = iter.into_iter();
let mut vector = match iterator . next () {
None=>return Vec::new() ,
Some ( element ) => {
let( lower , _ ) = iterator.size_hint();
// ...
}
};
// ...
}
}
impl<T> IntoIterator for Vec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter( mut self ) -> IntoIter<T> {
unsafe{
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let begin = ptr as * const T ;
let end = if mem :: size_of :: < T > ( ) == 0 {
arith_offset ( ptr as * const i8 , self.len() as isize )as*const T
} else {
ptr . offset (self.len()as isize | {
Vec{
buf :RawVec::from_raw_parts(ptr, capacity) ,
len :length ,
}
} | identifier_body |
spacing.rs | Field ( .. ) |
ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset)
};
let f: fn ( & _ , _ ) -> _ = unimplemented ! () ;
let f = unimplemented ! {} ;
{
foo ( ) ;
}
for & (sample, radiance) in samples.iter() {}
map(|& s| moo());
match x {
S{foo}=>92
}
}
enum Message {
Quit ,
ChangeColor ( i32 , i32 , i32 ) ,
Move { x : i32 , y : i32 } ,
Write ( String ) ,
}
enum Foo{
Bar = 123 ,
Baz=0
}
pub struct Vec < T > {
buf : RawVec < T> ,
len :usize ,
}
impl <T >Vec < T > {
pub fn new ( ) -> Vec <T> {
Vec {
buf : RawVec :: new ( ) ,
len :0,
}
}
pub fn with_capacity (capacity :usize)->Vec <T>{
Vec {
buf:RawVec::with_capacity ( capacity ),
len:0,
}
}
pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{
Vec{
buf :RawVec::from_raw_parts(ptr, capacity) ,
len :length ,
}
}
pub fn capacity( & self ) -> usize {
self . buf . cap ( )
}
pub fn reserve(& mut self, additional: usize) {
self. buf .reserve(self. len ,additional) ;
}
pub fn into_boxed_slice( mut self ) -> Box < [ T ] > {
unsafe{
self . shrink_to_fit ( ) ;
let buf = ptr::read( & self . buf );
mem :: forget ( self ) ;
buf.into_box()
}
}
pub fn truncate(&mut self ,len: usize) {
unsafe {
while len < self . len {
self . len -= 1 ;
let len = self . len ;
ptr::drop_in_place(self.get_unchecked_mut(len));
}
}
}
pub fn as_slice(& self) -> & [T] {
self
}
pub fn as_mut_slice(&mut self) -> & mut[T] {
& mut self [ .. ]
}
pub unsafe fn set_len(& mut self, len: usize) {
self . len = len;
}
pub fn remove(&mut self, index: usize) -> T {
let len = self.len();
assert!(index < len);
unsafe {
let ret;
{
let ptr = self.as_mut_ptr().offset(index as isize);
ret = ptr::read(ptr);
ptr::copy(ptr.offset (1), ptr, len-index-1);
}
self.set_len(len - 1);
ret
}
}
pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool
{
let len = self.len();
let mut del = 0;
{
let v = & mut * * self ;
for i in 0 .. len {
if ! f ( & v [ i ] ) {
del += 1 ;
} else if del > 0 {
v.swap(i - del, i);
}
}
}
if del>0{
self.truncate(len-del);
}
}
pub fn | <R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{
let len = self.len();
let start = * range.start() .unwrap_or( & 0 ) ;
let end = * range. end().unwrap_or( & len ) ;
assert!(start <= end);
assert!(end <= len);
}
}
impl<T:Clone>Vec<T>{
pub fn extend_from_slice(&mut self, other: & [ T ] ){
self.reserve(other.len());
for i in 0..other.len(){
let len = self.len();
unsafe {
ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone());
self.set_len(len + 1);
}
}
}
}
impl < T : PartialEq > Vec < T > {
pub fn dedup ( & mut self ) {
unsafe{
let ln = self.len();
if ln <= 1 {
return ;
}
let p = self.as_mut_ptr();
let mut r:usize =1 ;
let mut w:usize=1;
while r < ln {
let p_r = p.offset( r as isize );
let p_wm1 = p.offset ( ( w - 1 )as isize );
if * p_r !=* p_wm1 {
if r!=w{
let p_w = p_wm1.offset(1);
mem::swap( & mut * p_r , & mut * p_w );
}
w += 1;
}
r+=1 ;
}
self.truncate(w);
}
}
}
pub fn from_elem < T : Clone > ( elem :T ,n:usize) -> Vec <T>{
}
impl < T :Clone >Clone for Vec <T>{
fn clone(&self) -> Vec<T> {
< [ T ] > :: to_vec ( & * * self )
}
fn clone(&self) -> Vec<T> {
:: slice ::to_vec( &** self)
}
fn clone_from(&mut self, other:& Vec <T>) {
self.truncate(other . len());
let len=self. len();
self.clone_from_slice(& other [ .. len ]);
self.extend_from_slice(& other[ len .. ]);
}
}
impl< T:Hash>Hash for Vec<T> {
fn hash<H :hash :: Hasher >( & self, state: &mut H) {
Hash::hash(& **self, state)
}
}
impl<T> Index < usize > for Vec < T > {
type Output = T ;
fn index(&self, index: usize) ->& T {
& ( * * self ) [ index ]
}
}
impl < T > IndexMut < usize > for Vec < T > {
fn index_mut(&mut self, index: usize) -> &mut T {
& mut ( * * self ) [ index ]
}
}
impl<T> FromIterator<T> for Vec<T> {
fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > {
let mut iterator = iter.into_iter();
let mut vector = match iterator . next () {
None=>return Vec::new() ,
Some ( element ) => {
let( lower , _ ) = iterator.size_hint();
// ...
}
};
// ...
}
}
impl<T> IntoIterator for Vec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter( mut self ) -> IntoIter<T> {
unsafe{
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let begin = ptr as * const T ;
let end = if mem :: size_of :: < T > ( ) == 0 {
arith_offset ( ptr as * const i8 , self.len() as isize )as*const T
} else {
ptr . offset (self.len()as isize | drain | identifier_name |
spacing.rs | :: Field ( .. ) |
ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset)
};
let f: fn ( & _ , _ ) -> _ = unimplemented ! () ;
let f = unimplemented ! {} ;
{
foo ( ) ;
}
for & (sample, radiance) in samples.iter() {}
map(|& s| moo());
match x {
S{foo}=>92
}
}
enum Message {
Quit ,
ChangeColor ( i32 , i32 , i32 ) ,
Move { x : i32 , y : i32 } ,
Write ( String ) ,
}
enum Foo{
Bar = 123 ,
Baz=0
}
pub struct Vec < T > {
buf : RawVec < T> ,
len :usize ,
}
impl <T >Vec < T > {
pub fn new ( ) -> Vec <T> {
Vec {
buf : RawVec :: new ( ) ,
len :0,
}
}
pub fn with_capacity (capacity :usize)->Vec <T>{
Vec {
buf:RawVec::with_capacity ( capacity ),
len:0,
}
}
pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{
Vec{
buf :RawVec::from_raw_parts(ptr, capacity) ,
len :length , | }
}
pub fn capacity( & self ) -> usize {
self . buf . cap ( )
}
pub fn reserve(& mut self, additional: usize) {
self. buf .reserve(self. len ,additional) ;
}
pub fn into_boxed_slice( mut self ) -> Box < [ T ] > {
unsafe{
self . shrink_to_fit ( ) ;
let buf = ptr::read( & self . buf );
mem :: forget ( self ) ;
buf.into_box()
}
}
pub fn truncate(&mut self ,len: usize) {
unsafe {
while len < self . len {
self . len -= 1 ;
let len = self . len ;
ptr::drop_in_place(self.get_unchecked_mut(len));
}
}
}
pub fn as_slice(& self) -> & [T] {
self
}
pub fn as_mut_slice(&mut self) -> & mut[T] {
& mut self [ .. ]
}
pub unsafe fn set_len(& mut self, len: usize) {
self . len = len;
}
pub fn remove(&mut self, index: usize) -> T {
let len = self.len();
assert!(index < len);
unsafe {
let ret;
{
let ptr = self.as_mut_ptr().offset(index as isize);
ret = ptr::read(ptr);
ptr::copy(ptr.offset (1), ptr, len-index-1);
}
self.set_len(len - 1);
ret
}
}
pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool
{
let len = self.len();
let mut del = 0;
{
let v = & mut * * self ;
for i in 0 .. len {
if ! f ( & v [ i ] ) {
del += 1 ;
} else if del > 0 {
v.swap(i - del, i);
}
}
}
if del>0{
self.truncate(len-del);
}
}
pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{
let len = self.len();
let start = * range.start() .unwrap_or( & 0 ) ;
let end = * range. end().unwrap_or( & len ) ;
assert!(start <= end);
assert!(end <= len);
}
}
impl<T:Clone>Vec<T>{
pub fn extend_from_slice(&mut self, other: & [ T ] ){
self.reserve(other.len());
for i in 0..other.len(){
let len = self.len();
unsafe {
ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone());
self.set_len(len + 1);
}
}
}
}
impl < T : PartialEq > Vec < T > {
pub fn dedup ( & mut self ) {
unsafe{
let ln = self.len();
if ln <= 1 {
return ;
}
let p = self.as_mut_ptr();
let mut r:usize =1 ;
let mut w:usize=1;
while r < ln {
let p_r = p.offset( r as isize );
let p_wm1 = p.offset ( ( w - 1 )as isize );
if * p_r !=* p_wm1 {
if r!=w{
let p_w = p_wm1.offset(1);
mem::swap( & mut * p_r , & mut * p_w );
}
w += 1;
}
r+=1 ;
}
self.truncate(w);
}
}
}
pub fn from_elem < T : Clone > ( elem :T ,n:usize) -> Vec <T>{
}
impl < T :Clone >Clone for Vec <T>{
fn clone(&self) -> Vec<T> {
< [ T ] > :: to_vec ( & * * self )
}
fn clone(&self) -> Vec<T> {
:: slice ::to_vec( &** self)
}
fn clone_from(&mut self, other:& Vec <T>) {
self.truncate(other . len());
let len=self. len();
self.clone_from_slice(& other [ .. len ]);
self.extend_from_slice(& other[ len .. ]);
}
}
impl< T:Hash>Hash for Vec<T> {
fn hash<H :hash :: Hasher >( & self, state: &mut H) {
Hash::hash(& **self, state)
}
}
impl<T> Index < usize > for Vec < T > {
type Output = T ;
fn index(&self, index: usize) ->& T {
& ( * * self ) [ index ]
}
}
impl < T > IndexMut < usize > for Vec < T > {
fn index_mut(&mut self, index: usize) -> &mut T {
& mut ( * * self ) [ index ]
}
}
impl<T> FromIterator<T> for Vec<T> {
fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > {
let mut iterator = iter.into_iter();
let mut vector = match iterator . next () {
None=>return Vec::new() ,
Some ( element ) => {
let( lower , _ ) = iterator.size_hint();
// ...
}
};
// ...
}
}
impl<T> IntoIterator for Vec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter( mut self ) -> IntoIter<T> {
unsafe{
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let begin = ptr as * const T ;
let end = if mem :: size_of :: < T > ( ) == 0 {
arith_offset ( ptr as * const i8 , self.len() as isize )as*const T
} else {
ptr . offset (self.len()as isize) | random_line_split |
|
spacing.rs | :: Field ( .. ) |
ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset)
};
let f: fn ( & _ , _ ) -> _ = unimplemented ! () ;
let f = unimplemented ! {} ;
{
foo ( ) ;
}
for & (sample, radiance) in samples.iter() {}
map(|& s| moo());
match x {
S{foo}=>92
}
}
enum Message {
Quit ,
ChangeColor ( i32 , i32 , i32 ) ,
Move { x : i32 , y : i32 } ,
Write ( String ) ,
}
enum Foo{
Bar = 123 ,
Baz=0
}
pub struct Vec < T > {
buf : RawVec < T> ,
len :usize ,
}
impl <T >Vec < T > {
pub fn new ( ) -> Vec <T> {
Vec {
buf : RawVec :: new ( ) ,
len :0,
}
}
pub fn with_capacity (capacity :usize)->Vec <T>{
Vec {
buf:RawVec::with_capacity ( capacity ),
len:0,
}
}
pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{
Vec{
buf :RawVec::from_raw_parts(ptr, capacity) ,
len :length ,
}
}
pub fn capacity( & self ) -> usize {
self . buf . cap ( )
}
pub fn reserve(& mut self, additional: usize) {
self. buf .reserve(self. len ,additional) ;
}
pub fn into_boxed_slice( mut self ) -> Box < [ T ] > {
unsafe{
self . shrink_to_fit ( ) ;
let buf = ptr::read( & self . buf );
mem :: forget ( self ) ;
buf.into_box()
}
}
pub fn truncate(&mut self ,len: usize) {
unsafe {
while len < self . len {
self . len -= 1 ;
let len = self . len ;
ptr::drop_in_place(self.get_unchecked_mut(len));
}
}
}
pub fn as_slice(& self) -> & [T] {
self
}
pub fn as_mut_slice(&mut self) -> & mut[T] {
& mut self [ .. ]
}
pub unsafe fn set_len(& mut self, len: usize) {
self . len = len;
}
pub fn remove(&mut self, index: usize) -> T {
let len = self.len();
assert!(index < len);
unsafe {
let ret;
{
let ptr = self.as_mut_ptr().offset(index as isize);
ret = ptr::read(ptr);
ptr::copy(ptr.offset (1), ptr, len-index-1);
}
self.set_len(len - 1);
ret
}
}
pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool
{
let len = self.len();
let mut del = 0;
{
let v = & mut * * self ;
for i in 0 .. len {
if ! f ( & v [ i ] ) {
del += 1 ;
} else if del > 0 {
v.swap(i - del, i);
}
}
}
if del>0{
self.truncate(len-del);
}
}
pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{
let len = self.len();
let start = * range.start() .unwrap_or( & 0 ) ;
let end = * range. end().unwrap_or( & len ) ;
assert!(start <= end);
assert!(end <= len);
}
}
impl<T:Clone>Vec<T>{
pub fn extend_from_slice(&mut self, other: & [ T ] ){
self.reserve(other.len());
for i in 0..other.len(){
let len = self.len();
unsafe {
ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone());
self.set_len(len + 1);
}
}
}
}
impl < T : PartialEq > Vec < T > {
pub fn dedup ( & mut self ) {
unsafe{
let ln = self.len();
if ln <= 1 {
return ;
}
let p = self.as_mut_ptr();
let mut r:usize =1 ;
let mut w:usize=1;
while r < ln {
let p_r = p.offset( r as isize );
let p_wm1 = p.offset ( ( w - 1 )as isize );
if * p_r !=* p_wm1 |
r+=1 ;
}
self.truncate(w);
}
}
}
pub fn from_elem < T : Clone > ( elem :T ,n:usize) -> Vec <T>{
}
impl < T :Clone >Clone for Vec <T>{
fn clone(&self) -> Vec<T> {
< [ T ] > :: to_vec ( & * * self )
}
fn clone(&self) -> Vec<T> {
:: slice ::to_vec( &** self)
}
fn clone_from(&mut self, other:& Vec <T>) {
self.truncate(other . len());
let len=self. len();
self.clone_from_slice(& other [ .. len ]);
self.extend_from_slice(& other[ len .. ]);
}
}
impl< T:Hash>Hash for Vec<T> {
fn hash<H :hash :: Hasher >( & self, state: &mut H) {
Hash::hash(& **self, state)
}
}
impl<T> Index < usize > for Vec < T > {
type Output = T ;
fn index(&self, index: usize) ->& T {
& ( * * self ) [ index ]
}
}
impl < T > IndexMut < usize > for Vec < T > {
fn index_mut(&mut self, index: usize) -> &mut T {
& mut ( * * self ) [ index ]
}
}
impl<T> FromIterator<T> for Vec<T> {
fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > {
let mut iterator = iter.into_iter();
let mut vector = match iterator . next () {
None=>return Vec::new() ,
Some ( element ) => {
let( lower , _ ) = iterator.size_hint();
// ...
}
};
// ...
}
}
impl<T> IntoIterator for Vec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter( mut self ) -> IntoIter<T> {
unsafe{
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let begin = ptr as * const T ;
let end = if mem :: size_of :: < T > ( ) == 0 {
arith_offset ( ptr as * const i8 , self.len() as isize )as*const T
} else {
ptr . offset (self.len()as isize | {
if r!=w{
let p_w = p_wm1.offset(1);
mem::swap( & mut * p_r , & mut * p_w );
}
w += 1;
} | conditional_block |
SFRF_backend.py | )
return True
except:
inter.pop_up("ERROR", "Consulta Inválida")
return False
def retorna_data_sem_hora(data_str):
split1 = data_str.split(" ")
return split1[0]
def retorna_dia(data_str):
split1 = data_str.split(" ")
split2 = split1[0]
data = split2.split("/")
return data[2]
def retorna_hora(data_str):
split1 = data_str.split(" ")
return split1[1]
class McListBox(object):
def __init__(self, container_o, header, lista):
self.tree = None
self._setup_widgets(container_o, header)
self._build_tree(header, lista)
def _setup_widgets(self,container_o, header):
self.container = ttk.Frame(container_o)
self.container.pack(fill='both', expand=True)
self.tree = ttk.Treeview(columns=header, show="headings")
vsb = ttk.Scrollbar(orient="vertical", command=self.tree.yview)
hsb = ttk.Scrollbar(orient="horizontal", command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.tree.grid(column=0, row=0, sticky='nsew', in_=self.container)
vsb.grid(column=1, row=0, sticky='ns', in_=self.container)
hsb.grid(column=0, row=1, sticky='ew', in_=self.container)
self.container.grid_columnconfigure(0, weight=1)
self.container.grid_rowconfigure(0, weight=1)
def _build_tree(self, header, lista):
for col in header:
self.tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.tree, c, 0))
self.tree.column(col, width=tkFont.Font().measure(col.title()))
for item in lista:
self.tree.insert('', 'end', values=item)
for ix, val in enumerate(item):
col_w = tkFont.Font().measure(val)
if self.tree.column(header[ix],width=None)<col_w:
self.tree.column(header[ix], width=col_w)
def gerar_relatorio_resumido(tuplas, setor, mes, ano):
cursor = con.cursor
cursor.execute("SELECT Sigla FROM Setor WHERE Nome=(?)",(setor,))
sigla = cursor.fetchone()[0]
file_name = "Relatório resumido-"+sigla+"-"+mes+"-"+ano
file_path = asksaveasfilename(title = "Select file", initialfile=file_name, filetypes = (("Arquivos Excel","*.xlsx"),))
file_path = file_path+".xlsx"
alignment_left = Alignment(horizontal='left')
alignment_right = Alignment(horizontal='right')
alignment_center = Alignment(horizontal='center')
font_normal = Font(name='Arial')
font_bold = Font(name='Arial',bold=True)
#Criando workbook
wb = Workbook()
ws_resumido = wb.active
ws_resumido.title = "Histórico resumido"
#Formatando células para data de emissão do relatório
ws_resumido.merge_cells('F2:G2')
ws_resumido.merge_cells('H2:I2')
cell_emissao = ws_resumido['F2']
cell_data = ws_resumido['H2']
cell_emissao.alignment, cell_emissao.font = alignment_right, font_normal
cell_data.alignment, cell_data.font = alignment_left, font_normal
now = datetime.now()
if now.month < 10:
mes_emissao = "0"+str(now.month)
else:
mes_emissao = str(now.month)
ws_resumido['F2'] = "Data de emissão:"
ws_resumido['H2'] = str(now.day)+"/"+mes_emissao+"/"+str(now.year)
#Formatando células para nome do laboratorio
ws_resumido.merge_cells('A7:I7')
cell_lab = ws_resumido['A7']
cell_lab.font, cell_lab.alignment = font_normal, alignment_center
ws_resumido['A7'] = setor
#Formatando células mes e ano
cell_mes_str = ws_resumido['A9']
cell_mes = ws_resumido['B9']
cell_ano_str = ws_resumido['H9']
cell_ano = ws_resumido['I9']
cell_mes_str.font, cell_mes.font, cell_ano_str.font, cell_ano.font = font_normal, font_normal, font_normal, font_normal
cell_mes_str.alignment, cell_ano_str.alignment = alignment_right, alignment_right
cell_mes.alignment, cell_ano.alignment = alignment_left, alignment_left
ws_resumido['A9'] = "MÊS:"
ws_resumido['B9'] = mes
ws_resumido['H9'] = "ANO:"
ws_resumido['I9'] = ano #Puxar do banco
# Nome e qtd horas
ws_resumido.merge_cells('G11:I11')
cell_nome = ws_resumido['A11']
cell_qtd = ws_resumido['G11']
cell_nome.font, cell_qtd.font = font_bold, font_bold
cell_nome.alignment, cell_qtd.alignment = alignment_left, alignment_center
ws_resumido['A11'] = "NOME"
ws_resumido['G11'] = "QTD HORAS POR MÊS"
row_base = 12
for colaborador in tuplas:
row_name = 'A' + str(row_base)
row_time = 'H' + str(row_base)
ws_resumido[row_name] = colaborador[0]
ws_resumido[row_time] = colaborador[1]
ws_resumido[row_time].alignment = alignment_center
row_base+=1
wb.template = False
wb.save(file_path)
def gerar_relatorio_detalhado(setor, mes, ano):
dict_mes = {'Janeiro': '01','Fevereiro': '02','Março': '03','Abril': '04','Maio': '05','Junho': '06',
'Julho': '07','Agosto': '08','Setembro': '09','Outubro': '10','Novembro': '11','Dezembro': '12'}
cursor = con.cursor
cursor.execute("SELECT Sigla FROM Setor WHERE Nome=(?)",(setor,))
sigla = cursor.fetchone()[0]
file_name = "Relatório detalhado-"+sigla+"-"+mes+"-"+ano
file_path = asksaveasfilename(title = "Select file", initialfile=file_name, filetypes = (("Arquivos Excel","*.xlsx"),))
file_path = file_path+".xlsx"
alignment_left = Alignment(horizontal='left')
alignment_right = Alignment(horizontal='right')
alignment_center = Alignment(horizontal='center')
font_normal = Font(name='Arial')
font_bold = Font(name='Arial',bold=True)
lista_colab = retorna_lista_colab(setor)
#Criando workbook
wb = Workbook()
ws_array = []
ws_first = wb.active
ws_first.title = lista_colab[0]
ws_dict = {}
ws_dict[lista_colab[0]] = ws_first
for i in range(1,len(lista_colab)):
ws_temp = wb.create_sheet(lista_colab[i])
ws_dict[lista_colab[i]] = ws_temp
for colaborador in lista_colab:
#inserindo logo
colab = retorna_colab(colaborador, setor)
current_sheet = ws_dict[colaborador]
#Formatando células para data de emissão do relatório
current_sheet.merge_cells('F2:G2')
current_sheet.merge_cells('H2:I2')
cell_emissao = current_sheet['F2']
cell_data = current_sheet['H2']
cell_emissao.alignment, cell_emissao.font = alignment_right, font_normal
cell_data.alignment, cell_data.font = alignment_left, font_normal
now = datetime.now()
if now.month < 10:
mes_emissao = "0"+str(now.month)
else:
mes_emissao = str(now.month)
current_sheet['F2'] = "Data de emissão:"
current_sheet['H2'] = str(now.day)+"/"+mes_emissao+"/"+str(now.year)
name_cell = current_sheet['A8']
name_cell.font = font_normal
current_sheet['A8'] = "NOME: "+colaborador
lab_cell = current_sheet['A9']
lab_cell.font = font_normal
current_sheet['A9'] = "LABORATÓRIO: "+ setor
funcao_cell = current_sheet['A10']
funcao_cell.font = font_normal
current_sheet['A10'] = "FUNÇÃO: "+colab.funcao
cell_mes_str = current_sheet['A12']
cell_ano_str = current_sheet['H12']
cell_ano = current_sheet['I12'] |
cell_mes_str.font, cell_ano_str.font, cell_ano.font = font_normal, font_normal, font_normal
cell_mes_str.alignment, cell_ano_str.alignment = alignment_left, alignment_right | random_line_split |
|
SFRF_backend.py | "Cadastro Inválido")
else:
inter.pop_up("ERROR", "Cadastro Inválido")
return False
def retorna_lista_colab(setor):
cursor = con.cursor
if setor == 'Não Ativos':
cursor.execute("SELECT Nome FROM Colaborador WHERE Status='Não Ativo'")
else:
cursor.execute("SELECT Nome FROM Colaborador WHERE Setor = '%s' and Status='Ativo' or Status='Afastado' ORDER BY Nome"%setor)
lista = []
for nome in cursor.fetchall():
lista.append(str(nome[0]))
return lista
def retorna_lista_setor():
cursor = con.cursor
cursor.execute('''SELECT Nome
FROM Setor''')
lista = []
for nome in cursor.fetchall():
lista.append(str(nome[0]))
return lista
def retorna_dados_setor(setor):
cursor = con.cursor
try:
cursor.execute("SELECT Nome,Sigla FROM Setor WHERE Nome='%s'"%(setor))
return cursor.fetchall()[0]
except:
pop_up("ERROR", "Setor não encontrado!")
def retorna_colab(nome, setor):
cursor = con.cursor
if type(nome) != str:
if setor == 'Não Ativos':
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Status = 'Não Ativo'"%nome.get())
else:
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Setor='%s'"%(nome.get(), setor))
else:
if setor == 'Não Ativos':
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Status = 'Não Ativo'"%nome)
else:
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Setor='%s'"%(nome, setor))
lista = cursor.fetchall()
tupla = lista[0]
colab = Colaborador(tupla[0], tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[7], tupla[8], tupla[11])
return colab
def retorna_user(cpf):
cursor = con.cursor
cursor.execute("SELECT * FROM Colaborador WHERE cpf = '%s'"%cpf)
lista = cursor.fetchall()
tupla = lista[0]
colab = Colaborador(tupla[0], tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[7], tupla[8], tupla[11])
return colab
def retorna_objeto_date(data):
data_hora = data.split(" ")
data = data_hora[0].split("/")
hora = data_hora[1].split(":")
date = datetime(int(data[0]),int(data[1]),int(data[2]), int(hora[0]),int(hora[1]),int(hora[2]))
return date
class ImageMethods():
# Abre uma caixa de diálogo para o usuário informar o caminho da imagem
@staticmethod
def get_path(line_path):
tk_dialog = Tk()
tk_dialog.withdraw()
file_types=[('PNG file',"*.png"), ('JPG file', "*.jpg")]
file_name = askopenfilename(filetypes=file_types, title="Selecione o logo")
line_path.set(file_name)
tk_dialog.destroy()
# Retorna uma imagem representada por binários
@staticmethod
def get_binary(image_path):
binary = ''
with open(image_path, 'rb') as image:
binary = image.read()
return binary
@staticmethod
def get_file_type(image_path):
file_type = ""
for i in range(-3,0):
file_type = file_type + image_path[i]
return (file_type)
def retorna_data():
now = datetime.now()
dia = str(now.day)
if len(dia)<2:
dia = '0'+dia
mes = str(now.month)
if len(mes)<2:
mes = '0'+mes
ano = str(now.year)
data = ano+'/'+mes+'/'+dia
return data
# Algoritmo normal para validar cpf
def validar_cpf(cpf):
try:
if cpf.isnumeric() and (len(cpf) == 11):
igual = True
for i in range(1,11):
if cpf[i] != cpf[i-1]:
igual = False
if (igual):
return False
cpf_soma = 0
for i in range(0, 9):
cpf_soma = cpf_soma + int(cpf[i])*(10-i)
if ((cpf_soma*10)%11 == int(cpf[9])) or ((cpf_soma*10)%11 == 10 and int(cpf[9]) == 0):
cpf_soma = 0
for i in range(0, 10):
cpf_soma = cpf_soma + int(cpf[i])*(11-i)
if ((cpf_soma*10)%11 == int(cpf[10])) or ((cpf_soma*10)%11 == 10 and int(cpf[10]) == 0):
return True
else:
return False
return False
except:
return False
def validar_data(data):
try:
datetime.strptime(data, '%d/%m/%Y')
return True
except Exception as e:
return False
# Retorna uma lista de tuplas com o histórico do colaborador
def validar_chamada_historico(setor_, mes, ano, tipo):
setor, mes, ano = setor_.get(), mes.get(), ano.get()
if setor != "*Selecione o setor*":
try:
cursor = con.cursor
dict_mes = {'Janeiro': '01','Fevereiro': '02','Março': '03','Abril': '04','Maio': '05','Junho': '06',
'Julho': '07','Agosto': '08','Setembro': '09','Outubro': '10','Novembro': '11','Dezembro': '12'}
date = ano+"/"+dict_mes[mes]
if setor == "Não Ativos":
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Status = 'Não Ativo'")
else:
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Setor = '%s' ORDER BY Nome"%setor)
lista_tuplas = []
cpf_nome_colab = cursor.fetchall()
if len(cpf_nome_colab) == 0:
inter.pop_up("Atenção", "Não há membros neste setor")
return False
# Histoóio resumido
if tipo=='r':
for colab in cpf_nome_colab:
co | ipo=='d':
dias = ["01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20"
,"21","22","23","24","25","26","27","28","29","30","31"]
for dia in dias:
data_com_dia = date+"/"+ dia
linha = dia+" de "+mes+" de "+ano
if setor != "Não Ativos":
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
WHERE C.Setor = ? and C.cpf == F.cpf and C.Status != "Não Ativo" and F.entrada LIKE ? and F.saida IS NOT NULL
ORDER BY Nome''',(setor, data_com_dia+"%"))
else:
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
WHERE C.cpf == F.cpf and C.Status = "Não Ativo" and F.entrada LIKE ? and F.saida IS NOT NULL
ORDER BY Nome''',(data_com_dia+"%",))
frequencias = cursor.fetchall()
if frequencias:
lista_tuplas.append(("__________________",linha,"__________________"))
for frequencia in frequencias:
tupla = (frequencia[0], frequencia[1],frequencia[2])
lista_tuplas.append(tupla)
return lista_tuplas
except:
return False
else:
inter.pop_up("ERROR", | unter = timedelta()
cursor.execute("SELECT entrada, saida FROM Frequencia WHERE entrada LIKE ? and cpf = ? and saida IS NOT NULL",((date+'%', colab[0])))
for frequencia in cursor.fetchall():
entrada = retorna_objeto_date(frequencia[0])
saida = retorna_objeto_date(frequencia[1])
counter += (saida-entrada)
tupla = (colab[1], str(counter))
lista_tuplas.append(tupla)
# Histórico detalhado
elif t | conditional_block |
SFRF_backend.py | [0].split("/")
hora = data_hora[1].split(":")
date = datetime(int(data[0]),int(data[1]),int(data[2]), int(hora[0]),int(hora[1]),int(hora[2]))
return date
class ImageMethods():
# Abre uma caixa de diálogo para o usuário informar o caminho da imagem
@staticmethod
def get_path(line_path):
tk_dialog = Tk()
tk_dialog.withdraw()
file_types=[('PNG file',"*.png"), ('JPG file', "*.jpg")]
file_name = askopenfilename(filetypes=file_types, title="Selecione o logo")
line_path.set(file_name)
tk_dialog.destroy()
# Retorna uma imagem representada por binários
@staticmethod
def get_binary(image_path):
binary = ''
with open(image_path, 'rb') as image:
binary = image.read()
return binary
@staticmethod
def get_file_type(image_path):
file_type = ""
for i in range(-3,0):
file_type = file_type + image_path[i]
return (file_type)
def retorna_data():
now = datetime.now()
dia = str(now.day)
if len(dia)<2:
dia = '0'+dia
mes = str(now.month)
if len(mes)<2:
mes = '0'+mes
ano = str(now.year)
data = ano+'/'+mes+'/'+dia
return data
# Algoritmo normal para validar cpf
def validar_cpf(cpf):
try:
if cpf.isnumeric() and (len(cpf) == 11):
igual = True
for i in range(1,11):
if cpf[i] != cpf[i-1]:
igual = False
if (igual):
return False
cpf_soma = 0
for i in range(0, 9):
cpf_soma = cpf_soma + int(cpf[i])*(10-i)
if ((cpf_soma*10)%11 == int(cpf[9])) or ((cpf_soma*10)%11 == 10 and int(cpf[9]) == 0):
cpf_soma = 0
for i in range(0, 10):
cpf_soma = cpf_soma + int(cpf[i])*(11-i)
if ((cpf_soma*10)%11 == int(cpf[10])) or ((cpf_soma*10)%11 == 10 and int(cpf[10]) == 0):
return True
else:
return False
return False
except:
return False
def validar_data(data):
try:
datetime.strptime(data, '%d/%m/%Y')
return True
except Exception as e:
return False
# Retorna uma lista de tuplas com o histórico do colaborador
def validar_chamada_historico(setor_, mes, ano, tipo):
setor, mes, ano = setor_.get(), mes.get(), ano.get()
if setor != "*Selecione o setor*":
try:
cursor = con.cursor
dict_mes = {'Janeiro': '01','Fevereiro': '02','Março': '03','Abril': '04','Maio': '05','Junho': '06',
'Julho': '07','Agosto': '08','Setembro': '09','Outubro': '10','Novembro': '11','Dezembro': '12'}
date = ano+"/"+dict_mes[mes]
if setor == "Não Ativos":
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Status = 'Não Ativo'")
else:
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Setor = '%s' ORDER BY Nome"%setor)
lista_tuplas = []
cpf_nome_colab = cursor.fetchall()
if len(cpf_nome_colab) == 0:
inter.pop_up("Atenção", "Não há membros neste setor")
return False
# Histoóio resumido
if tipo=='r':
for colab in cpf_nome_colab:
counter = timedelta()
cursor.execute("SELECT entrada, saida FROM Frequencia WHERE entrada LIKE ? and cpf = ? and saida IS NOT NULL",((date+'%', colab[0])))
for frequencia in cursor.fetchall():
entrada = retorna_objeto_date(frequencia[0])
saida = retorna_objeto_date(frequencia[1])
counter += (saida-entrada)
tupla = (colab[1], str(counter))
lista_tuplas.append(tupla)
# Histórico detalhado
elif tipo=='d':
dias = ["01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20"
,"21","22","23","24","25","26","27","28","29","30","31"]
for dia in dias:
data_com_dia = date+"/"+ dia
linha = dia+" de "+mes+" de "+ano
if setor != "Não Ativos":
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
WHERE C.Setor = ? and C.cpf == F.cpf and C.Status != "Não Ativo" and F.entrada LIKE ? and F.saida IS NOT NULL
ORDER BY Nome''',(setor, data_com_dia+"%"))
else:
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
WHERE C.cpf == F.cpf and C.Status = "Não Ativo" and F.entrada LIKE ? and F.saida IS NOT NULL
ORDER BY Nome''',(data_com_dia+"%",))
frequencias = cursor.fetchall()
if frequencias:
lista_tuplas.append(("__________________",linha,"__________________"))
for frequencia in frequencias:
tupla = (frequencia[0], frequencia[1],frequencia[2])
lista_tuplas.append(tupla)
return lista_tuplas
except:
return False
else:
inter.pop_up("ERROR", "Setor inválido!")
return False
def validar_login(login, senha, event=None):
if (login.get() != "") and (senha.get() != ""):
try:
colab = retorna_user(login.get())
# Só é possível logar se status do caloborador for "Ativo"
if (colab.senha == senha.get()) and (colab.status == "Ativo") and (colab.funcao != "Funcionario"):
return colab
else:
# Colobarador com a função "funcionario" não pode logar no sistema
if(colab.funcao == "Funcionario"):
inter.pop_up("ERROR", "Usuário sem permissão de acesso!")
return None
inter.pop_up("ERROR", "Login ou Senha Inválida")
return None
except Exception as e:
inter.pop_up("ERROR", "Login ou Senha Inválida")
return None
else:
inter.pop_up("ERROR", "Login ou Senha Inválida")
return None
def excluir_colaborador(cpf, setor):
try:
cursor = con.cursor
#Desativa o Colaborador
cursor.execute("UPDATE Colaborador SET Status = 'Não Ativo' WHERE cpf = (?)", (cpf,))
con.conexao.commit()
return True
except Exception as e:
inter.pop_up("ERROR", "Não foi possível remover o colaborador.")
return False
def excluir_setor(nome, sigla):
# Retorna lista de colaboradores no setor
lista_colab = retorna_lista_colab(nome)
# Só é possível excluir o setor se ele não tiver mais colaboradores
if len(lista_colab) == 0:
try:
cursor = con.cursor
cursor.execute("DELETE FROM Setor WHERE Nome = '%s'"%nome)
con.conexao.commit()
return True
except:
inter.pop_up("ERROR", "Não foi possível remover o setor")
else:
inter.pop_up("Error", "O setor deve estar vazio para ser removido")
return False
def validar_consulta(setor):
if setor.get() != "" and setor.get() != "*Selecione o setor*":
return True
else:
inter.pop_up("ERROR", "Consulta Inválida")
return False
def validar_consulta_2(nome_colab, setor):
try:
colab = retorna_colab(nome_colab, setor)
return True
| except:
inter.pop_up("ERROR", "Consulta Inválida")
return False
def retorna_data_sem_hora(data_str):
split1 = data_str.split | identifier_body |
|
SFRF_backend.py | ogo):
cursor = con.cursor
conexao = con.conexao
if nome.get() != "" and sigla.get()!="":
try:
if logo.get() != "":
file_type = ImageMethods.get_file_type(logo.get())
file_binary = ImageMethods.get_binary(logo.get())
cursor.execute("INSERT INTO Setor VALUES (?, ?, ?, ?)", (nome.get(), sigla.get(), file_binary, file_type))
else:
cursor.execute("INSERT INTO Setor VALUES (?, ?, ?, ?)", (nome.get(), sigla.get(), None, None))
conexao.commit()
return True
except:
inter.pop_up("ERROR", "Cadastro Inválido")
else:
inter.pop_up("ERROR", "Cadastro Inválido")
return False
def retorna_lista_colab(setor):
cursor = con.cursor
if setor == 'Não Ativos':
cursor.execute("SELECT Nome FROM Colaborador WHERE Status='Não Ativo'")
else:
cursor.execute("SELECT Nome FROM Colaborador WHERE Setor = '%s' and Status='Ativo' or Status='Afastado' ORDER BY Nome"%setor)
lista = []
for nome in cursor.fetchall():
lista.append(str(nome[0]))
return lista
def retorna_lista_setor():
cursor = con.cursor
cursor.execute('''SELECT Nome
FROM Setor''')
lista = []
for nome in cursor.fetchall():
lista.append(str(nome[0]))
return lista
def retorna_dados_setor(setor):
cursor = con.cursor
try:
cursor.execute("SELECT Nome,Sigla FROM Setor WHERE Nome='%s'"%(setor))
return cursor.fetchall()[0]
except:
pop_up("ERROR", "Setor não encontrado!")
def retorna_colab(nome, setor):
cursor = con.cursor
if type(nome) != str:
if setor == 'Não Ativos':
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Status = 'Não Ativo'"%nome.get())
else:
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Setor='%s'"%(nome.get(), setor))
else:
if setor == 'Não Ativos':
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Status = 'Não Ativo'"%nome)
else:
cursor.execute("SELECT * FROM Colaborador WHERE Nome = '%s' and Setor='%s'"%(nome, setor))
lista = cursor.fetchall()
tupla = lista[0]
colab = Colaborador(tupla[0], tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[7], tupla[8], tupla[11])
return colab
def retorna_user(cpf):
cursor = con.cursor
cursor.execute("SELECT * FROM Colaborador WHERE cpf = '%s'"%cpf)
lista = cursor.fetchall()
tupla = lista[0]
colab = Colaborador(tupla[0], tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[7], tupla[8], tupla[11])
return colab
def retorna_objeto_date(data):
data_hora = data.split(" ")
data = data_hora[0].split("/")
hora = data_hora[1].split(":")
date = datetime(int(data[0]),int(data[1]),int(data[2]), int(hora[0]),int(hora[1]),int(hora[2]))
return date
class ImageMethods():
# Abre uma caixa de diálogo para o usuário informar o caminho da imagem
@staticmethod
def get_path(line_path):
tk_dialog = Tk()
tk_dialog.withdraw()
file_types=[('PNG file',"*.png"), ('JPG file', "*.jpg")]
file_name = askopenfilename(filetypes=file_types, title="Selecione o logo")
line_path.set(file_name)
tk_dialog.destroy()
# Retorna uma imagem representada por binários
@staticmethod
def get_binary(image_path):
binary = ''
with open(image_path, 'rb') as image:
binary = image.read()
return binary
@staticmethod
def get_file_type(image_path):
file_type = ""
for i in range(-3,0):
file_type = file_type + image_path[i]
return (file_type)
def retorna_data():
now = datetime.now()
dia = str(now.day)
if len(dia)<2:
dia = '0'+dia
mes = str(now.month)
if len(mes)<2:
mes = '0'+mes
ano = str(now.year)
data = ano+'/'+mes+'/'+dia
return data
# Algoritmo normal para validar cpf
def validar_cpf(cpf):
try:
if cpf.isnumeric() and (len(cpf) == 11):
igual = True
for i in range(1,11):
if cpf[i] != cpf[i-1]:
igual = False
if (igual):
return False
cpf_soma = 0
for i in range(0, 9):
cpf_soma = cpf_soma + int(cpf[i])*(10-i)
if ((cpf_soma*10)%11 == int(cpf[9])) or ((cpf_soma*10)%11 == 10 and int(cpf[9]) == 0):
cpf_soma = 0
for i in range(0, 10):
cpf_soma = cpf_soma + int(cpf[i])*(11-i)
if ((cpf_soma*10)%11 == int(cpf[10])) or ((cpf_soma*10)%11 == 10 and int(cpf[10]) == 0):
return True
else:
return False
return False
except:
return False
def validar_data(data):
try:
datetime.strptime(data, '%d/%m/%Y')
return True
except Exception as e:
return False
# Retorna uma lista de tuplas com o histórico do colaborador
def validar_chamada_historico(setor_, mes, ano, tipo):
setor, mes, ano = setor_.get(), mes.get(), ano.get()
if setor != "*Selecione o setor*":
try:
cursor = con.cursor
dict_mes = {'Janeiro': '01','Fevereiro': '02','Março': '03','Abril': '04','Maio': '05','Junho': '06',
'Julho': '07','Agosto': '08','Setembro': '09','Outubro': '10','Novembro': '11','Dezembro': '12'}
date = ano+"/"+dict_mes[mes]
if setor == "Não Ativos":
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Status = 'Não Ativo'")
else:
cursor.execute("SELECT cpf,Nome From Colaborador WHERE Setor = '%s' ORDER BY Nome"%setor)
lista_tuplas = []
cpf_nome_colab = cursor.fetchall()
if len(cpf_nome_colab) == 0:
inter.pop_up("Atenção", "Não há membros neste setor")
return False
# Histoóio resumido
if tipo=='r':
for colab in cpf_nome_colab:
counter = timedelta()
cursor.execute("SELECT entrada, saida FROM Frequencia WHERE entrada LIKE ? and cpf = ? and saida IS NOT NULL",((date+'%', colab[0])))
for frequencia in cursor.fetchall():
entrada = retorna_objeto_date(frequencia[0])
saida = retorna_objeto_date(frequencia[1])
counter += (saida-entrada)
tupla = (colab[1], str(counter))
lista_tuplas.append(tupla)
# Histórico detalhado
elif tipo=='d':
dias = ["01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20"
,"21","22","23","24","25","26","27","28","29","30","31"]
for dia in dias:
data_com_dia = date+"/"+ dia
linha = dia+" de "+mes+" de "+ano
if setor != "Não Ativos":
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
WHERE C.Setor = ? and C.cpf == F.cpf and C.Status != "Não Ativo" and F.entrada LIKE ? and F.saida IS NOT NULL
ORDER BY Nome''',(setor, data_com_dia+"%"))
else:
cursor.execute('''SELECT Nome, entrada, saida
FROM Colaborador as C, Frequencia as F
| (nome, sigla, l | identifier_name |
|
xcode.rs | fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> {
if_chain! {
if let Some(config) = pi.get_configuration("release")
.or_else(|| pi.get_configuration("debug"));
if let Some(target) = pi.get_first_target();
then {
let vars = pi.get_build_vars(target, config)?;
if let Some(path) = vars.get("INFOPLIST_FILE") {
let base = vars.get("PROJECT_DIR").map(Path::new)
.unwrap_or_else(|| pi.base_path());
let path = base.join(path);
return Ok(Some(InfoPlist::load_and_process(path, &vars)?))
}
}
}
Ok(None)
}
/// Loads an info plist file from a path and processes it with the given vars
pub fn load_and_process<P: AsRef<Path>>(
path: P,
vars: &HashMap<String, String>,
) -> Result<InfoPlist> {
// do we want to preprocess the plist file?
let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") {
let mut c = process::Command::new("cc");
c.arg("-xc").arg("-P").arg("-E");
if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") {
for token in defs.split_whitespace() {
c.arg(token);
}
}
if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") {
for token in defs.split_whitespace() {
c.arg(format!("-D{token}"));
}
}
c.arg(path.as_ref());
let p = c.output()?;
InfoPlist::from_reader(Cursor::new(&p.stdout[..]))
} else {
InfoPlist::from_path(path).or_else(|err| {
/*
This is sort of an edge-case, as XCode is not producing an `Info.plist` file
by default anymore. However, it still does so for some templates.
For example iOS Storyboard template will produce a partial `Info.plist` file,
with a content only related to the Storyboard itself, but not the project as a whole. eg.
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>UIApplicationSceneManifest</key>
<dict>
<key>UISceneConfigurations</key>
<dict>
<key>UIWindowSceneSessionRoleApplication</key>
<array>
<dict>
<key>UISceneStoryboardFile</key>
<string>Main</string>
</dict>
</array>
</dict>
</dict>
</dict>
</plist>
This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains
no data required by the CLI to correctly produce a `InfoPlist` struct.
In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary,
or directly through `env` if we were called from within XCode itself.
*/
InfoPlist::from_env_vars(vars).map_err(|e| e.context(err))
})
};
plist.map(|raw| InfoPlist {
name: expand_xcodevars(&raw.name, vars),
bundle_id: expand_xcodevars(&raw.bundle_id, vars),
version: expand_xcodevars(&raw.version, vars),
build: expand_xcodevars(&raw.build, vars),
})
}
/// Loads an info plist from provided environment variables list
pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> {
let name = vars
.get("PRODUCT_NAME")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?;
let bundle_id = vars
.get("PRODUCT_BUNDLE_IDENTIFIER")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?;
let version = vars
.get("MARKETING_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?;
let build = vars
.get("CURRENT_PROJECT_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?;
Ok(InfoPlist {
name,
bundle_id,
version,
build,
})
}
/// Loads an info plist file from a path and does not process it.
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> {
let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?;
InfoPlist::from_reader(&mut f)
}
/// Loads an info plist file from a reader.
pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> {
let rdr = BufReader::new(rdr);
plist::from_reader(rdr).context("Could not parse Info.plist file")
}
pub fn get_release_name(&self) -> String {
format!("{}@{}", self.bundle_id(), self.version())
}
pub fn version(&self) -> &str {
&self.version
}
pub fn build(&self) -> &str {
&self.build
}
pub fn name(&self) -> &str {
&self.name
}
pub fn bundle_id(&self) -> &str {
&self.bundle_id
}
}
/// Helper struct that allows the current execution to detach from
/// the xcode console and continue in the background. This becomes
/// a dummy shim for non xcode runs or platforms.
pub struct MayDetach<'a> {
output_file: Option<TempFile>,
#[allow(dead_code)]
task_name: &'a str,
}
impl<'a> MayDetach<'a> {
fn new(task_name: &'a str) -> MayDetach<'a> {
MayDetach {
output_file: None,
task_name,
}
}
/// Returns true if we are deteached from xcode
pub fn is_detached(&self) -> bool {
self.output_file.is_some()
}
/// If we are launched from xcode this detaches us from the xcode console
/// and continues execution in the background. From this moment on output
/// is captured and the user is notified with notifications.
#[cfg(target_os = "macos")]
pub fn may_detach(&mut self) -> Result<bool> {
if !launched_from_xcode() {
return Ok(false);
}
println!("Continuing in background.");
show_notification("Sentry", &format!("{} starting", self.task_name))?;
let output_file = TempFile::create()?;
daemonize_redirect(
Some(output_file.path()),
Some(output_file.path()),
ChdirMode::NoChdir,
)
.unwrap();
self.output_file = Some(output_file);
Ok(true)
}
/// For non mac platforms this just never detaches.
#[cfg(not(target_os = "macos"))]
pub fn may_detach(&mut self) -> Result<bool> {
Ok(false)
}
/// Wraps the execution of a code block. Does not detach until someone
/// calls into `may_detach`.
#[cfg(target_os = "macos")]
pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>(
task_name: &'a str,
f: F,
) -> Result<T> {
use std::time::Duration;
let mut md = MayDetach::new(task_name);
match f(&mut md) {
Ok(x) => {
md.show_done()?;
Ok(x)
}
Err(err) => {
if let Some(ref output_file) = md.output_file {
crate::utils::system::print_error(&err);
if md.show_critical_info()? {
open::that(output_file.path())?;
std::thread::sleep(Duration::from_millis(5000));
}
}
Err(err)
}
}
}
/// Dummy wrap call that never detaches for non mac platforms.
#[cfg(not(target_os = "macos"))]
pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> {
f(&mut MayDetach::new(task_name))
}
#[cfg(target_os = "macos")]
fn show_critical_info(&self) -> Result<bool> {
show_critical_info(
&format!("{} failed", self.task_name),
"The Sentry build step failed while running in the background. \
You can ignore this error or view details to attempt to resolve \
it. Ignoring it might cause your crashes not to be handled \
properly.",
)
}
#[cfg(target_os = "macos")]
fn show_done(&self) -> Result<()> {
if self.is_detached() | {
show_notification("Sentry", &format!("{} finished", self.task_name))?;
} | conditional_block |
|
xcode.rs | ),
})
}
/// Loads an info plist from provided environment variables list
pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> {
let name = vars
.get("PRODUCT_NAME")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?;
let bundle_id = vars
.get("PRODUCT_BUNDLE_IDENTIFIER")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?;
let version = vars
.get("MARKETING_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?;
let build = vars
.get("CURRENT_PROJECT_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?;
Ok(InfoPlist {
name,
bundle_id,
version,
build,
})
}
/// Loads an info plist file from a path and does not process it.
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> {
let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?;
InfoPlist::from_reader(&mut f)
}
/// Loads an info plist file from a reader.
pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> {
let rdr = BufReader::new(rdr);
plist::from_reader(rdr).context("Could not parse Info.plist file")
}
pub fn get_release_name(&self) -> String {
format!("{}@{}", self.bundle_id(), self.version())
}
pub fn version(&self) -> &str {
&self.version
}
pub fn build(&self) -> &str {
&self.build
}
pub fn name(&self) -> &str {
&self.name
}
pub fn bundle_id(&self) -> &str {
&self.bundle_id
}
}
/// Helper struct that allows the current execution to detach from
/// the xcode console and continue in the background. This becomes
/// a dummy shim for non xcode runs or platforms.
pub struct MayDetach<'a> {
output_file: Option<TempFile>,
#[allow(dead_code)]
task_name: &'a str,
}
impl<'a> MayDetach<'a> {
fn new(task_name: &'a str) -> MayDetach<'a> {
MayDetach {
output_file: None,
task_name,
}
}
/// Returns true if we are deteached from xcode
pub fn is_detached(&self) -> bool {
self.output_file.is_some()
}
/// If we are launched from xcode this detaches us from the xcode console
/// and continues execution in the background. From this moment on output
/// is captured and the user is notified with notifications.
#[cfg(target_os = "macos")]
pub fn may_detach(&mut self) -> Result<bool> {
if !launched_from_xcode() {
return Ok(false);
}
println!("Continuing in background.");
show_notification("Sentry", &format!("{} starting", self.task_name))?;
let output_file = TempFile::create()?;
daemonize_redirect(
Some(output_file.path()),
Some(output_file.path()),
ChdirMode::NoChdir,
)
.unwrap();
self.output_file = Some(output_file);
Ok(true)
}
/// For non mac platforms this just never detaches.
#[cfg(not(target_os = "macos"))]
pub fn may_detach(&mut self) -> Result<bool> {
Ok(false)
}
/// Wraps the execution of a code block. Does not detach until someone
/// calls into `may_detach`.
#[cfg(target_os = "macos")]
pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>(
task_name: &'a str,
f: F,
) -> Result<T> {
use std::time::Duration;
let mut md = MayDetach::new(task_name);
match f(&mut md) {
Ok(x) => {
md.show_done()?;
Ok(x)
}
Err(err) => {
if let Some(ref output_file) = md.output_file {
crate::utils::system::print_error(&err);
if md.show_critical_info()? {
open::that(output_file.path())?;
std::thread::sleep(Duration::from_millis(5000));
}
}
Err(err)
}
}
}
/// Dummy wrap call that never detaches for non mac platforms.
#[cfg(not(target_os = "macos"))]
pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> {
f(&mut MayDetach::new(task_name))
}
#[cfg(target_os = "macos")]
fn show_critical_info(&self) -> Result<bool> {
show_critical_info(
&format!("{} failed", self.task_name),
"The Sentry build step failed while running in the background. \
You can ignore this error or view details to attempt to resolve \
it. Ignoring it might cause your crashes not to be handled \
properly.",
)
}
#[cfg(target_os = "macos")]
fn show_done(&self) -> Result<()> {
if self.is_detached() {
show_notification("Sentry", &format!("{} finished", self.task_name))?;
}
Ok(())
}
}
/// Returns true if we were invoked from xcode
#[cfg(target_os = "macos")]
pub fn launched_from_xcode() -> bool {
if env::var("XCODE_VERSION_ACTUAL").is_err() {
return false;
}
let mut pid = unsafe { getpid() as u32 };
while let Some(parent) = mac_process_info::get_parent_pid(pid) {
if parent == 1 {
break;
}
if let Ok(name) = mac_process_info::get_process_name(parent) {
if name == "Xcode" {
return true;
}
}
pid = parent;
}
false
}
/// Returns true if we were invoked from xcode
#[cfg(not(target_os = "macos"))]
pub fn launched_from_xcode() -> bool {
false
}
/// Shows a dialog in xcode and blocks. The dialog will have a title and a
/// message as well as the buttons "Show details" and "Ignore". Returns
/// `true` if the `show details` button has been pressed.
#[cfg(target_os = "macos")]
pub fn show_critical_info(title: &str, message: &str) -> Result<bool> {
use serde::Serialize;
lazy_static! {
static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new(
"
var App = Application('XCode');
App.includeStandardAdditions = true;
return App.displayAlert($params.title, {
message: $params.message,
as: \"critical\",
buttons: [\"Show details\", \"Ignore\"]
});
"
);
}
#[derive(Serialize)]
struct AlertParams<'a> {
title: &'a str,
message: &'a str,
}
#[derive(Debug, Deserialize)]
struct AlertResult {
#[serde(rename = "buttonReturned")]
button: String,
}
let rv: AlertResult = SCRIPT
.execute_with_params(AlertParams { title, message })
.context("Failed to display Xcode dialog")?;
Ok(&rv.button != "Ignore")
}
/// Shows a notification in xcode
#[cfg(target_os = "macos")]
pub fn show_notification(title: &str, message: &str) -> Result<()> {
use crate::config::Config;
use serde::Serialize;
lazy_static! {
static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new(
"
var App = Application.currentApplication();
App.includeStandardAdditions = true;
App.displayNotification($params.message, {
withTitle: $params.title
});
"
);
}
let config = Config::current();
if !config.show_notifications()? {
return Ok(());
}
#[derive(Serialize)]
struct NotificationParams<'a> {
title: &'a str,
message: &'a str,
}
SCRIPT
.execute_with_params(NotificationParams { title, message })
.context("Failed to display Xcode notification")?;
Ok(())
}
#[test]
fn test_expansion() | {
let mut vars = HashMap::new();
vars.insert("FOO_BAR".to_string(), "foo bar baz / blah".to_string());
assert_eq!(
expand_xcodevars("A$(FOO_BAR:rfc1034identifier)B", &vars),
"Afoo-bar-baz-blahB"
);
assert_eq!(
expand_xcodevars("A$(FOO_BAR:identifier)B", &vars),
"Afoo_bar_baz_blahB"
);
assert_eq!(
expand_xcodevars("A${FOO_BAR:identifier}B", &vars),
"Afoo_bar_baz_blahB"
);
} | identifier_body |
|
xcode.rs | => {
rv.project.path = path.as_ref().canonicalize()?;
Ok(rv.project)
}
Err(e) => {
warn!("Your .xcodeproj might be malformed. Command `xcodebuild -list -json -project {}` failed to produce a valid JSON output.", path.as_ref().display());
Err(e.into())
}
}
}
pub fn base_path(&self) -> &Path {
self.path.parent().unwrap()
}
pub fn get_build_vars(
&self,
target: &str,
configuration: &str,
) -> Result<HashMap<String, String>> {
let mut rv = HashMap::new();
let p = process::Command::new("xcodebuild")
.arg("-showBuildSettings")
.arg("-project")
.arg(&self.path)
.arg("-target")
.arg(target)
.arg("-configuration")
.arg(configuration)
.output()?;
for line_rv in p.stdout.lines() {
let line = line_rv?;
if let Some(suffix) = line.strip_prefix(" ") {
let mut sep = suffix.splitn(2, " = ");
if_chain! {
if let Some(key) = sep.next();
if let Some(value) = sep.next();
then {
rv.insert(key.to_owned(), value.to_owned());
}
}
}
}
Ok(rv)
}
/// Return the first target
pub fn get_first_target(&self) -> Option<&str> {
if !self.targets.is_empty() {
Some(&self.targets[0])
} else {
None
}
}
/// Returns the config with a certain name
pub fn get_configuration(&self, name: &str) -> Option<&str> {
let name = name.to_lowercase();
self.configurations
.iter()
.find(|&cfg| cfg.to_lowercase() == name)
.map(|v| v.as_ref())
}
}
impl InfoPlist {
/// Loads a processed plist file.
pub fn discover_from_env() -> Result<Option<InfoPlist>> {
// if we are loaded directly from xcode we can trust the os environment
// and pass those variables to the processor.
if env::var("XCODE_VERSION_ACTUAL").is_ok() {
let vars: HashMap<_, _> = env::vars().collect();
if let Some(filename) = vars.get("INFOPLIST_FILE") {
let base = vars.get("PROJECT_DIR").map(String::as_str).unwrap_or(".");
let path = env::current_dir().unwrap().join(base).join(filename);
Ok(Some(InfoPlist::load_and_process(path, &vars)?))
} else if let Ok(default_plist) = InfoPlist::from_env_vars(&vars) {
Ok(Some(default_plist))
} else {
Ok(None)
}
// otherwise, we discover the project info from the current path and
// invoke xcodebuild to give us the project settings for the first
// target.
} else {
if_chain! {
if let Ok(here) = env::current_dir();
if let Some(pi) = get_xcode_project_info(&here)?;
then {
InfoPlist::from_project_info(&pi)
} else {
Ok(None)
}
}
}
}
/// Loads an info plist from a given project info
pub fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> {
if_chain! {
if let Some(config) = pi.get_configuration("release")
.or_else(|| pi.get_configuration("debug"));
if let Some(target) = pi.get_first_target();
then {
let vars = pi.get_build_vars(target, config)?;
if let Some(path) = vars.get("INFOPLIST_FILE") {
let base = vars.get("PROJECT_DIR").map(Path::new)
.unwrap_or_else(|| pi.base_path());
let path = base.join(path);
return Ok(Some(InfoPlist::load_and_process(path, &vars)?))
}
}
}
Ok(None)
}
/// Loads an info plist file from a path and processes it with the given vars
pub fn load_and_process<P: AsRef<Path>>(
path: P,
vars: &HashMap<String, String>,
) -> Result<InfoPlist> {
// do we want to preprocess the plist file?
let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") {
let mut c = process::Command::new("cc");
c.arg("-xc").arg("-P").arg("-E");
if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") {
for token in defs.split_whitespace() {
c.arg(token);
}
}
if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") {
for token in defs.split_whitespace() {
c.arg(format!("-D{token}"));
}
}
c.arg(path.as_ref());
let p = c.output()?;
InfoPlist::from_reader(Cursor::new(&p.stdout[..]))
} else {
InfoPlist::from_path(path).or_else(|err| {
/*
This is sort of an edge-case, as XCode is not producing an `Info.plist` file
by default anymore. However, it still does so for some templates.
For example iOS Storyboard template will produce a partial `Info.plist` file,
with a content only related to the Storyboard itself, but not the project as a whole. eg.
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>UIApplicationSceneManifest</key>
<dict>
<key>UISceneConfigurations</key>
<dict>
<key>UIWindowSceneSessionRoleApplication</key>
<array>
<dict>
<key>UISceneStoryboardFile</key>
<string>Main</string>
</dict>
</array>
</dict>
</dict>
</dict>
</plist>
This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains
no data required by the CLI to correctly produce a `InfoPlist` struct.
In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary,
or directly through `env` if we were called from within XCode itself.
*/
InfoPlist::from_env_vars(vars).map_err(|e| e.context(err))
})
};
plist.map(|raw| InfoPlist {
name: expand_xcodevars(&raw.name, vars),
bundle_id: expand_xcodevars(&raw.bundle_id, vars),
version: expand_xcodevars(&raw.version, vars),
build: expand_xcodevars(&raw.build, vars),
})
}
/// Loads an info plist from provided environment variables list
pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> {
let name = vars
.get("PRODUCT_NAME")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?;
let bundle_id = vars
.get("PRODUCT_BUNDLE_IDENTIFIER")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?;
let version = vars
.get("MARKETING_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?;
let build = vars
.get("CURRENT_PROJECT_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?;
Ok(InfoPlist {
name,
bundle_id,
version,
build,
})
}
/// Loads an info plist file from a path and does not process it.
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> {
let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?;
InfoPlist::from_reader(&mut f)
}
/// Loads an info plist file from a reader.
pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> {
let rdr = BufReader::new(rdr);
plist::from_reader(rdr).context("Could not parse Info.plist file")
}
pub fn get_release_name(&self) -> String {
format!("{}@{}", self.bundle_id(), self.version())
}
pub fn version(&self) -> &str {
&self.version
}
pub fn build(&self) -> &str {
&self.build
}
pub fn name(&self) -> &str {
&self.name
}
pub fn bundle_id(&self) -> &str {
&self.bundle_id
}
}
/// Helper struct that allows the current execution to detach from
/// the xcode console and continue in the background. This becomes
/// a dummy shim for non xcode runs or platforms.
pub struct | MayDetach | identifier_name |
|
xcode.rs | : AsRef<Path>>(
path: P,
vars: &HashMap<String, String>,
) -> Result<InfoPlist> {
// do we want to preprocess the plist file?
let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") {
let mut c = process::Command::new("cc");
c.arg("-xc").arg("-P").arg("-E");
if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") {
for token in defs.split_whitespace() {
c.arg(token);
}
}
if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") {
for token in defs.split_whitespace() {
c.arg(format!("-D{token}"));
}
}
c.arg(path.as_ref());
let p = c.output()?;
InfoPlist::from_reader(Cursor::new(&p.stdout[..]))
} else {
InfoPlist::from_path(path).or_else(|err| {
/*
This is sort of an edge-case, as XCode is not producing an `Info.plist` file
by default anymore. However, it still does so for some templates.
For example iOS Storyboard template will produce a partial `Info.plist` file,
with a content only related to the Storyboard itself, but not the project as a whole. eg.
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>UIApplicationSceneManifest</key>
<dict>
<key>UISceneConfigurations</key>
<dict>
<key>UIWindowSceneSessionRoleApplication</key>
<array>
<dict>
<key>UISceneStoryboardFile</key>
<string>Main</string>
</dict>
</array>
</dict>
</dict>
</dict>
</plist>
This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains
no data required by the CLI to correctly produce a `InfoPlist` struct.
In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary,
or directly through `env` if we were called from within XCode itself.
*/
InfoPlist::from_env_vars(vars).map_err(|e| e.context(err))
})
};
plist.map(|raw| InfoPlist {
name: expand_xcodevars(&raw.name, vars),
bundle_id: expand_xcodevars(&raw.bundle_id, vars),
version: expand_xcodevars(&raw.version, vars),
build: expand_xcodevars(&raw.build, vars),
})
}
/// Loads an info plist from provided environment variables list
pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> {
let name = vars
.get("PRODUCT_NAME")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?;
let bundle_id = vars
.get("PRODUCT_BUNDLE_IDENTIFIER")
.map(String::to_owned)
.ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?;
let version = vars
.get("MARKETING_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?;
let build = vars
.get("CURRENT_PROJECT_VERSION")
.map(String::to_owned)
.ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?;
Ok(InfoPlist {
name,
bundle_id,
version,
build,
})
}
/// Loads an info plist file from a path and does not process it.
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> {
let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?;
InfoPlist::from_reader(&mut f)
}
/// Loads an info plist file from a reader.
pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> {
let rdr = BufReader::new(rdr);
plist::from_reader(rdr).context("Could not parse Info.plist file")
}
pub fn get_release_name(&self) -> String {
format!("{}@{}", self.bundle_id(), self.version())
}
pub fn version(&self) -> &str {
&self.version
}
pub fn build(&self) -> &str {
&self.build
}
pub fn name(&self) -> &str {
&self.name
}
pub fn bundle_id(&self) -> &str {
&self.bundle_id
}
}
/// Helper struct that allows the current execution to detach from
/// the xcode console and continue in the background. This becomes
/// a dummy shim for non xcode runs or platforms.
pub struct MayDetach<'a> {
output_file: Option<TempFile>,
#[allow(dead_code)]
task_name: &'a str,
}
impl<'a> MayDetach<'a> {
fn new(task_name: &'a str) -> MayDetach<'a> {
MayDetach {
output_file: None,
task_name,
}
}
/// Returns true if we are deteached from xcode
pub fn is_detached(&self) -> bool {
self.output_file.is_some()
}
/// If we are launched from xcode this detaches us from the xcode console
/// and continues execution in the background. From this moment on output
/// is captured and the user is notified with notifications.
#[cfg(target_os = "macos")]
pub fn may_detach(&mut self) -> Result<bool> {
if !launched_from_xcode() {
return Ok(false);
}
println!("Continuing in background.");
show_notification("Sentry", &format!("{} starting", self.task_name))?;
let output_file = TempFile::create()?;
daemonize_redirect(
Some(output_file.path()),
Some(output_file.path()),
ChdirMode::NoChdir,
)
.unwrap();
self.output_file = Some(output_file);
Ok(true)
}
/// For non mac platforms this just never detaches.
#[cfg(not(target_os = "macos"))]
pub fn may_detach(&mut self) -> Result<bool> {
Ok(false)
}
/// Wraps the execution of a code block. Does not detach until someone
/// calls into `may_detach`.
#[cfg(target_os = "macos")]
pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>(
task_name: &'a str,
f: F,
) -> Result<T> {
use std::time::Duration;
let mut md = MayDetach::new(task_name);
match f(&mut md) {
Ok(x) => {
md.show_done()?;
Ok(x)
}
Err(err) => {
if let Some(ref output_file) = md.output_file {
crate::utils::system::print_error(&err);
if md.show_critical_info()? {
open::that(output_file.path())?;
std::thread::sleep(Duration::from_millis(5000));
}
}
Err(err)
}
}
}
/// Dummy wrap call that never detaches for non mac platforms.
#[cfg(not(target_os = "macos"))]
pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> {
f(&mut MayDetach::new(task_name))
}
#[cfg(target_os = "macos")]
fn show_critical_info(&self) -> Result<bool> {
show_critical_info(
&format!("{} failed", self.task_name),
"The Sentry build step failed while running in the background. \
You can ignore this error or view details to attempt to resolve \
it. Ignoring it might cause your crashes not to be handled \
properly.",
)
}
#[cfg(target_os = "macos")]
fn show_done(&self) -> Result<()> {
if self.is_detached() {
show_notification("Sentry", &format!("{} finished", self.task_name))?;
}
Ok(())
}
}
/// Returns true if we were invoked from xcode
#[cfg(target_os = "macos")]
pub fn launched_from_xcode() -> bool {
if env::var("XCODE_VERSION_ACTUAL").is_err() {
return false;
}
let mut pid = unsafe { getpid() as u32 };
while let Some(parent) = mac_process_info::get_parent_pid(pid) {
if parent == 1 {
break;
}
if let Ok(name) = mac_process_info::get_process_name(parent) {
if name == "Xcode" {
return true;
}
}
pid = parent;
}
false
}
/// Returns true if we were invoked from xcode
#[cfg(not(target_os = "macos"))]
pub fn launched_from_xcode() -> bool {
false
} | random_line_split |
||
elf.rs | , format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> | buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
}
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty != Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b != 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn relocate_programs(
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx) != maps_idx && Some(sym.st_shndx) != text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn_idx | {
if self.obj.header.e_type != ET_REL || self.obj.header.e_machine != EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()? != scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || { | identifier_body |
elf.rs | , format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type != ET_REL || self.obj.header.e_machine != EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()? != scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
}
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty != Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b != 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn | (
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx) != maps_idx && Some(sym.st_shndx) != text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn | relocate_programs | identifier_name |
elf.rs | , format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type != ET_REL || self.obj.header.e_machine != EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()? != scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
}
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty != Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
| debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b != 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn relocate_programs(
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx) != maps_idx && Some(sym.st_shndx) != text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn_idx]. | random_line_split |
|
elf.rs | , format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type != ET_REL || self.obj.header.e_machine != EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()? != scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => |
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty != Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b != 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn relocate_programs(
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx) != maps_idx && Some(sym.st_shndx) != text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn | {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
} | conditional_block |
disk.go | irGroup, ok := new(bbssig.Group).Unmarshal(cont.TheirGroup)
if !ok {
return errors.New("client: failed to unmarshal their group")
}
if contact.myGroupKey, ok = new(bbssig.MemberKey).Unmarshal(theirGroup, cont.MyGroupKey); !ok {
return errors.New("client: failed to unmarshal my group key")
}
if cont.TheirServer == nil {
return errors.New("client: contact missing server")
}
contact.theirServer = *cont.TheirServer
if len(cont.TheirPub) != len(contact.theirPub) {
return errors.New("client: contact missing public key")
}
copy(contact.theirPub[:], cont.TheirPub)
if len(cont.TheirIdentityPublic) != len(contact.theirIdentityPublic) {
return errors.New("client: contact missing identity public key")
}
copy(contact.theirIdentityPublic[:], cont.TheirIdentityPublic)
copy(contact.theirLastDHPublic[:], cont.TheirLastPublic)
copy(contact.theirCurrentDHPublic[:], cont.TheirCurrentPublic)
for _, prevTag := range cont.PreviousTags {
contact.previousTags = append(contact.previousTags, previousTag{
tag: prevTag.Tag,
expired: time.Unix(*prevTag.Expired, 0),
})
}
// For now we'll have to do this conditionally until everyone
// has updated local state.
if cont.Generation != nil {
contact.generation = *cont.Generation
}
if cont.SupportedVersion != nil {
contact.supportedVersion = *cont.SupportedVersion
}
contact.events = make([]Event, 0, len(cont.Events))
for _, evt := range cont.Events {
event := Event{
t: time.Unix(*evt.Time, 0),
msg: *evt.Message,
}
contact.events = append(contact.events, event)
}
}
now := c.Now()
for _, m := range state.Inbox {
msg := &InboxMessage{
id: *m.Id,
from: *m.From,
receivedTime: time.Unix(*m.ReceivedTime, 0),
acked: *m.Acked,
read: *m.Read,
sealed: m.Sealed,
retained: m.GetRetained(),
exposureTime: now,
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in inbox: " + err.Error())
}
}
c.inbox = append(c.inbox, msg)
}
for _, m := range state.Outbox {
msg := &queuedMessage{
id: *m.Id,
to: *m.To,
server: *m.Server,
created: time.Unix(*m.Created, 0),
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in outbox: " + err.Error())
}
}
if m.Sent != nil {
msg.sent = time.Unix(*m.Sent, 0)
}
if m.Acked != nil {
msg.acked = time.Unix(*m.Acked, 0)
}
if len(m.Request) != 0 {
msg.request = new(pond.Request)
if err := proto.Unmarshal(m.Request, msg.request); err != nil {
return errors.New("client: corrupt request in outbox: " + err.Error())
}
}
msg.revocation = m.GetRevocation()
if msg.revocation && len(msg.server) == 0 {
// There was a bug in some versions where revoking a
// pending contact would result in a revocation message
// with an empty server.
msg.server = c.server
}
c.outbox = append(c.outbox, msg)
if msg.sent.IsZero() && (msg.to == 0 || !c.contacts[msg.to].revokedUs) {
// This message hasn't been sent yet.
c.enqueue(msg)
}
}
for _, m := range state.Drafts {
draft := &Draft{
id: *m.Id,
body: *m.Body,
attachments: m.Attachments,
detachments: m.Detachments,
created: time.Unix(*m.Created, 0),
}
c.registerId(draft.id)
if m.To != nil {
draft.to = *m.To
}
if m.InReplyTo != nil {
draft.inReplyTo = *m.InReplyTo
}
c.drafts[draft.id] = draft
}
return nil
}
func (c *client) marshal() []byte {
var err error
var contacts []*disk.Contact
for _, contact := range c.contacts {
cont := &disk.Contact{
Id: proto.Uint64(contact.id),
Name: proto.String(contact.name),
GroupKey: contact.groupKey.Marshal(),
IsPending: proto.Bool(contact.isPending),
KeyExchangeBytes: contact.kxsBytes,
LastPrivate: contact.lastDHPrivate[:],
CurrentPrivate: contact.currentDHPrivate[:],
SupportedVersion: proto.Int32(contact.supportedVersion),
PandaKeyExchange: contact.pandaKeyExchange,
PandaError: proto.String(contact.pandaResult),
RevokedUs: proto.Bool(contact.revokedUs),
}
if !contact.isPending {
cont.MyGroupKey = contact.myGroupKey.Marshal()
cont.TheirGroup = contact.myGroupKey.Group.Marshal()
cont.TheirServer = proto.String(contact.theirServer)
cont.TheirPub = contact.theirPub[:]
cont.Generation = proto.Uint32(contact.generation)
cont.TheirIdentityPublic = contact.theirIdentityPublic[:]
cont.TheirLastPublic = contact.theirLastDHPublic[:]
cont.TheirCurrentPublic = contact.theirCurrentDHPublic[:]
}
if contact.ratchet != nil {
cont.Ratchet = contact.ratchet.Marshal(time.Now(), messageLifetime)
}
for _, prevTag := range contact.previousTags {
if time.Since(prevTag.expired) > previousTagLifetime {
continue
}
cont.PreviousTags = append(cont.PreviousTags, &disk.Contact_PreviousTag{
Tag: prevTag.tag,
Expired: proto.Int64(prevTag.expired.Unix()),
})
}
cont.Events = make([]*disk.Contact_Event, 0, len(contact.events))
for _, event := range contact.events {
if time.Since(event.t) > messageLifetime {
continue
}
cont.Events = append(cont.Events, &disk.Contact_Event{
Time: proto.Int64(event.t.Unix()),
Message: proto.String(event.msg),
})
}
contacts = append(contacts, cont)
}
var inbox []*disk.Inbox
for _, msg := range c.inbox {
if time.Since(msg.receivedTime) > messageLifetime && !msg.retained {
continue
}
m := &disk.Inbox{
Id: proto.Uint64(msg.id),
From: proto.Uint64(msg.from),
ReceivedTime: proto.Int64(msg.receivedTime.Unix()),
Acked: proto.Bool(msg.acked),
Read: proto.Bool(msg.read),
Sealed: msg.sealed,
Retained: proto.Bool(msg.retained),
}
if msg.message != nil {
if m.Message, err = proto.Marshal(msg.message); err != nil {
panic(err)
}
}
inbox = append(inbox, m)
}
var outbox []*disk.Outbox
for _, msg := range c.outbox {
if time.Since(msg.created) > messageLifetime {
continue
}
m := &disk.Outbox{
Id: proto.Uint64(msg.id),
To: proto.Uint64(msg.to),
Server: proto.String(msg.server),
Created: proto.Int64(msg.created.Unix()),
Revocation: proto.Bool(msg.revocation),
}
if msg.message != nil {
if m.Message, err = proto.Marshal(msg.message); err != nil {
panic(err)
}
}
if !msg.sent.IsZero() {
m.Sent = proto.Int64(msg.sent.Unix())
}
if !msg.acked.IsZero() {
m.Acked = proto.Int64(msg.acked.Unix())
}
if msg.request != nil {
if m.Request, err = proto.Marshal(msg.request); err != nil {
panic(err)
}
}
outbox = append(outbox, m)
}
| var drafts []*disk.Draft
for _, draft := range c.drafts { | random_line_split |
|
disk.go | ig.PrivateKey).Unmarshal(group, state.GroupPrivate)
if !ok {
return errors.New("client: failed to unmarshal group private key")
}
if len(state.Private) != len(c.priv) {
return errors.New("client: failed to unmarshal private key")
}
copy(c.priv[:], state.Private)
if len(state.Public) != len(c.pub) {
return errors.New("client: failed to unmarshal public key")
}
copy(c.pub[:], state.Public)
c.generation = *state.Generation
if state.LastErasureStorageTime != nil {
c.lastErasureStorageTime = time.Unix(*state.LastErasureStorageTime, 0)
}
for _, prevGroupPriv := range state.PreviousGroupPrivateKeys {
group, ok := new(bbssig.Group).Unmarshal(prevGroupPriv.Group)
if !ok {
return errors.New("client: failed to unmarshal previous group")
}
priv, ok := new(bbssig.PrivateKey).Unmarshal(group, prevGroupPriv.GroupPrivate)
if !ok {
return errors.New("client: failed to unmarshal previous group private key")
}
c.prevGroupPrivs = append(c.prevGroupPrivs, previousGroupPrivateKey{
priv: priv,
expired: time.Unix(*prevGroupPriv.Expired, 0),
})
}
for _, cont := range state.Contacts {
contact := &Contact{
id: *cont.Id,
name: *cont.Name,
kxsBytes: cont.KeyExchangeBytes,
pandaKeyExchange: cont.PandaKeyExchange,
pandaResult: cont.GetPandaError(),
revokedUs: cont.GetRevokedUs(),
}
c.registerId(contact.id)
c.contacts[contact.id] = contact
if contact.groupKey, ok = new(bbssig.MemberKey).Unmarshal(c.groupPriv.Group, cont.GroupKey); !ok {
return errors.New("client: failed to unmarshal group member key")
}
copy(contact.lastDHPrivate[:], cont.LastPrivate)
copy(contact.currentDHPrivate[:], cont.CurrentPrivate)
if cont.Ratchet != nil {
contact.ratchet = c.newRatchet(contact)
if err := contact.ratchet.Unmarshal(cont.Ratchet); err != nil {
return err
}
}
if cont.IsPending != nil && *cont.IsPending {
contact.isPending = true
continue
}
theirGroup, ok := new(bbssig.Group).Unmarshal(cont.TheirGroup)
if !ok {
return errors.New("client: failed to unmarshal their group")
}
if contact.myGroupKey, ok = new(bbssig.MemberKey).Unmarshal(theirGroup, cont.MyGroupKey); !ok {
return errors.New("client: failed to unmarshal my group key")
}
if cont.TheirServer == nil {
return errors.New("client: contact missing server")
}
contact.theirServer = *cont.TheirServer
if len(cont.TheirPub) != len(contact.theirPub) {
return errors.New("client: contact missing public key")
}
copy(contact.theirPub[:], cont.TheirPub)
if len(cont.TheirIdentityPublic) != len(contact.theirIdentityPublic) {
return errors.New("client: contact missing identity public key")
}
copy(contact.theirIdentityPublic[:], cont.TheirIdentityPublic)
copy(contact.theirLastDHPublic[:], cont.TheirLastPublic)
copy(contact.theirCurrentDHPublic[:], cont.TheirCurrentPublic)
for _, prevTag := range cont.PreviousTags {
contact.previousTags = append(contact.previousTags, previousTag{
tag: prevTag.Tag,
expired: time.Unix(*prevTag.Expired, 0),
})
}
// For now we'll have to do this conditionally until everyone
// has updated local state.
if cont.Generation != nil {
contact.generation = *cont.Generation
}
if cont.SupportedVersion != nil {
contact.supportedVersion = *cont.SupportedVersion
}
contact.events = make([]Event, 0, len(cont.Events))
for _, evt := range cont.Events {
event := Event{
t: time.Unix(*evt.Time, 0),
msg: *evt.Message,
}
contact.events = append(contact.events, event)
}
}
now := c.Now()
for _, m := range state.Inbox {
msg := &InboxMessage{
id: *m.Id,
from: *m.From,
receivedTime: time.Unix(*m.ReceivedTime, 0),
acked: *m.Acked,
read: *m.Read,
sealed: m.Sealed,
retained: m.GetRetained(),
exposureTime: now,
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in inbox: " + err.Error())
}
}
c.inbox = append(c.inbox, msg)
}
for _, m := range state.Outbox {
msg := &queuedMessage{
id: *m.Id,
to: *m.To,
server: *m.Server,
created: time.Unix(*m.Created, 0),
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in outbox: " + err.Error())
}
}
if m.Sent != nil {
msg.sent = time.Unix(*m.Sent, 0)
}
if m.Acked != nil {
msg.acked = time.Unix(*m.Acked, 0)
}
if len(m.Request) != 0 {
msg.request = new(pond.Request)
if err := proto.Unmarshal(m.Request, msg.request); err != nil {
return errors.New("client: corrupt request in outbox: " + err.Error())
}
}
msg.revocation = m.GetRevocation()
if msg.revocation && len(msg.server) == 0 {
// There was a bug in some versions where revoking a
// pending contact would result in a revocation message
// with an empty server.
msg.server = c.server
}
c.outbox = append(c.outbox, msg)
if msg.sent.IsZero() && (msg.to == 0 || !c.contacts[msg.to].revokedUs) {
// This message hasn't been sent yet.
c.enqueue(msg)
}
}
for _, m := range state.Drafts {
draft := &Draft{
id: *m.Id,
body: *m.Body,
attachments: m.Attachments,
detachments: m.Detachments,
created: time.Unix(*m.Created, 0),
}
c.registerId(draft.id)
if m.To != nil {
draft.to = *m.To
}
if m.InReplyTo != nil {
draft.inReplyTo = *m.InReplyTo
}
c.drafts[draft.id] = draft
}
return nil
}
func (c *client) | () []byte {
var err error
var contacts []*disk.Contact
for _, contact := range c.contacts {
cont := &disk.Contact{
Id: proto.Uint64(contact.id),
Name: proto.String(contact.name),
GroupKey: contact.groupKey.Marshal(),
IsPending: proto.Bool(contact.isPending),
KeyExchangeBytes: contact.kxsBytes,
LastPrivate: contact.lastDHPrivate[:],
CurrentPrivate: contact.currentDHPrivate[:],
SupportedVersion: proto.Int32(contact.supportedVersion),
PandaKeyExchange: contact.pandaKeyExchange,
PandaError: proto.String(contact.pandaResult),
RevokedUs: proto.Bool(contact.revokedUs),
}
if !contact.isPending {
cont.MyGroupKey = contact.myGroupKey.Marshal()
cont.TheirGroup = contact.myGroupKey.Group.Marshal()
cont.TheirServer = proto.String(contact.theirServer)
cont.TheirPub = contact.theirPub[:]
cont.Generation = proto.Uint32(contact.generation)
cont.TheirIdentityPublic = contact.theirIdentityPublic[:]
cont.TheirLastPublic = contact.theirLastDHPublic[:]
cont.TheirCurrentPublic = contact.theirCurrentDHPublic[:]
}
if contact.ratchet != nil {
cont.Ratchet = contact.ratchet.Marshal(time.Now(), messageLifetime)
}
for _, prevTag := range contact.previousTags {
if time.Since(prevTag.expired) > previousTagLifetime {
continue
}
cont.PreviousTags = append(cont.PreviousTags, &disk.Contact_PreviousTag{
Tag: prevTag.tag,
Expired: proto.Int6 | marshal | identifier_name |
disk.go | }
copy(c.priv[:], state.Private)
if len(state.Public) != len(c.pub) {
return errors.New("client: failed to unmarshal public key")
}
copy(c.pub[:], state.Public)
c.generation = *state.Generation
if state.LastErasureStorageTime != nil {
c.lastErasureStorageTime = time.Unix(*state.LastErasureStorageTime, 0)
}
for _, prevGroupPriv := range state.PreviousGroupPrivateKeys {
group, ok := new(bbssig.Group).Unmarshal(prevGroupPriv.Group)
if !ok {
return errors.New("client: failed to unmarshal previous group")
}
priv, ok := new(bbssig.PrivateKey).Unmarshal(group, prevGroupPriv.GroupPrivate)
if !ok {
return errors.New("client: failed to unmarshal previous group private key")
}
c.prevGroupPrivs = append(c.prevGroupPrivs, previousGroupPrivateKey{
priv: priv,
expired: time.Unix(*prevGroupPriv.Expired, 0),
})
}
for _, cont := range state.Contacts {
contact := &Contact{
id: *cont.Id,
name: *cont.Name,
kxsBytes: cont.KeyExchangeBytes,
pandaKeyExchange: cont.PandaKeyExchange,
pandaResult: cont.GetPandaError(),
revokedUs: cont.GetRevokedUs(),
}
c.registerId(contact.id)
c.contacts[contact.id] = contact
if contact.groupKey, ok = new(bbssig.MemberKey).Unmarshal(c.groupPriv.Group, cont.GroupKey); !ok {
return errors.New("client: failed to unmarshal group member key")
}
copy(contact.lastDHPrivate[:], cont.LastPrivate)
copy(contact.currentDHPrivate[:], cont.CurrentPrivate)
if cont.Ratchet != nil {
contact.ratchet = c.newRatchet(contact)
if err := contact.ratchet.Unmarshal(cont.Ratchet); err != nil {
return err
}
}
if cont.IsPending != nil && *cont.IsPending {
contact.isPending = true
continue
}
theirGroup, ok := new(bbssig.Group).Unmarshal(cont.TheirGroup)
if !ok {
return errors.New("client: failed to unmarshal their group")
}
if contact.myGroupKey, ok = new(bbssig.MemberKey).Unmarshal(theirGroup, cont.MyGroupKey); !ok {
return errors.New("client: failed to unmarshal my group key")
}
if cont.TheirServer == nil {
return errors.New("client: contact missing server")
}
contact.theirServer = *cont.TheirServer
if len(cont.TheirPub) != len(contact.theirPub) {
return errors.New("client: contact missing public key")
}
copy(contact.theirPub[:], cont.TheirPub)
if len(cont.TheirIdentityPublic) != len(contact.theirIdentityPublic) {
return errors.New("client: contact missing identity public key")
}
copy(contact.theirIdentityPublic[:], cont.TheirIdentityPublic)
copy(contact.theirLastDHPublic[:], cont.TheirLastPublic)
copy(contact.theirCurrentDHPublic[:], cont.TheirCurrentPublic)
for _, prevTag := range cont.PreviousTags {
contact.previousTags = append(contact.previousTags, previousTag{
tag: prevTag.Tag,
expired: time.Unix(*prevTag.Expired, 0),
})
}
// For now we'll have to do this conditionally until everyone
// has updated local state.
if cont.Generation != nil {
contact.generation = *cont.Generation
}
if cont.SupportedVersion != nil {
contact.supportedVersion = *cont.SupportedVersion
}
contact.events = make([]Event, 0, len(cont.Events))
for _, evt := range cont.Events {
event := Event{
t: time.Unix(*evt.Time, 0),
msg: *evt.Message,
}
contact.events = append(contact.events, event)
}
}
now := c.Now()
for _, m := range state.Inbox {
msg := &InboxMessage{
id: *m.Id,
from: *m.From,
receivedTime: time.Unix(*m.ReceivedTime, 0),
acked: *m.Acked,
read: *m.Read,
sealed: m.Sealed,
retained: m.GetRetained(),
exposureTime: now,
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in inbox: " + err.Error())
}
}
c.inbox = append(c.inbox, msg)
}
for _, m := range state.Outbox {
msg := &queuedMessage{
id: *m.Id,
to: *m.To,
server: *m.Server,
created: time.Unix(*m.Created, 0),
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in outbox: " + err.Error())
}
}
if m.Sent != nil {
msg.sent = time.Unix(*m.Sent, 0)
}
if m.Acked != nil {
msg.acked = time.Unix(*m.Acked, 0)
}
if len(m.Request) != 0 {
msg.request = new(pond.Request)
if err := proto.Unmarshal(m.Request, msg.request); err != nil {
return errors.New("client: corrupt request in outbox: " + err.Error())
}
}
msg.revocation = m.GetRevocation()
if msg.revocation && len(msg.server) == 0 {
// There was a bug in some versions where revoking a
// pending contact would result in a revocation message
// with an empty server.
msg.server = c.server
}
c.outbox = append(c.outbox, msg)
if msg.sent.IsZero() && (msg.to == 0 || !c.contacts[msg.to].revokedUs) {
// This message hasn't been sent yet.
c.enqueue(msg)
}
}
for _, m := range state.Drafts {
draft := &Draft{
id: *m.Id,
body: *m.Body,
attachments: m.Attachments,
detachments: m.Detachments,
created: time.Unix(*m.Created, 0),
}
c.registerId(draft.id)
if m.To != nil {
draft.to = *m.To
}
if m.InReplyTo != nil {
draft.inReplyTo = *m.InReplyTo
}
c.drafts[draft.id] = draft
}
return nil
}
func (c *client) marshal() []byte {
var err error
var contacts []*disk.Contact
for _, contact := range c.contacts {
cont := &disk.Contact{
Id: proto.Uint64(contact.id),
Name: proto.String(contact.name),
GroupKey: contact.groupKey.Marshal(),
IsPending: proto.Bool(contact.isPending),
KeyExchangeBytes: contact.kxsBytes,
LastPrivate: contact.lastDHPrivate[:],
CurrentPrivate: contact.currentDHPrivate[:],
SupportedVersion: proto.Int32(contact.supportedVersion),
PandaKeyExchange: contact.pandaKeyExchange,
PandaError: proto.String(contact.pandaResult),
RevokedUs: proto.Bool(contact.revokedUs),
}
if !contact.isPending {
cont.MyGroupKey = contact.myGroupKey.Marshal()
cont.TheirGroup = contact.myGroupKey.Group.Marshal()
cont.TheirServer = proto.String(contact.theirServer)
cont.TheirPub = contact.theirPub[:]
cont.Generation = proto.Uint32(contact.generation)
cont.TheirIdentityPublic = contact.theirIdentityPublic[:]
cont.TheirLastPublic = contact.theirLastDHPublic[:]
cont.TheirCurrent | {
c.server = *state.Server
if len(state.Identity) != len(c.identity) {
return errors.New("client: identity is wrong length in State")
}
copy(c.identity[:], state.Identity)
curve25519.ScalarBaseMult(&c.identityPublic, &c.identity)
group, ok := new(bbssig.Group).Unmarshal(state.Group)
if !ok {
return errors.New("client: failed to unmarshal group")
}
c.groupPriv, ok = new(bbssig.PrivateKey).Unmarshal(group, state.GroupPrivate)
if !ok {
return errors.New("client: failed to unmarshal group private key")
}
if len(state.Private) != len(c.priv) {
return errors.New("client: failed to unmarshal private key") | identifier_body |
|
disk.go | ix(*prevGroupPriv.Expired, 0),
})
}
for _, cont := range state.Contacts {
contact := &Contact{
id: *cont.Id,
name: *cont.Name,
kxsBytes: cont.KeyExchangeBytes,
pandaKeyExchange: cont.PandaKeyExchange,
pandaResult: cont.GetPandaError(),
revokedUs: cont.GetRevokedUs(),
}
c.registerId(contact.id)
c.contacts[contact.id] = contact
if contact.groupKey, ok = new(bbssig.MemberKey).Unmarshal(c.groupPriv.Group, cont.GroupKey); !ok {
return errors.New("client: failed to unmarshal group member key")
}
copy(contact.lastDHPrivate[:], cont.LastPrivate)
copy(contact.currentDHPrivate[:], cont.CurrentPrivate)
if cont.Ratchet != nil {
contact.ratchet = c.newRatchet(contact)
if err := contact.ratchet.Unmarshal(cont.Ratchet); err != nil {
return err
}
}
if cont.IsPending != nil && *cont.IsPending {
contact.isPending = true
continue
}
theirGroup, ok := new(bbssig.Group).Unmarshal(cont.TheirGroup)
if !ok {
return errors.New("client: failed to unmarshal their group")
}
if contact.myGroupKey, ok = new(bbssig.MemberKey).Unmarshal(theirGroup, cont.MyGroupKey); !ok {
return errors.New("client: failed to unmarshal my group key")
}
if cont.TheirServer == nil {
return errors.New("client: contact missing server")
}
contact.theirServer = *cont.TheirServer
if len(cont.TheirPub) != len(contact.theirPub) {
return errors.New("client: contact missing public key")
}
copy(contact.theirPub[:], cont.TheirPub)
if len(cont.TheirIdentityPublic) != len(contact.theirIdentityPublic) {
return errors.New("client: contact missing identity public key")
}
copy(contact.theirIdentityPublic[:], cont.TheirIdentityPublic)
copy(contact.theirLastDHPublic[:], cont.TheirLastPublic)
copy(contact.theirCurrentDHPublic[:], cont.TheirCurrentPublic)
for _, prevTag := range cont.PreviousTags {
contact.previousTags = append(contact.previousTags, previousTag{
tag: prevTag.Tag,
expired: time.Unix(*prevTag.Expired, 0),
})
}
// For now we'll have to do this conditionally until everyone
// has updated local state.
if cont.Generation != nil {
contact.generation = *cont.Generation
}
if cont.SupportedVersion != nil {
contact.supportedVersion = *cont.SupportedVersion
}
contact.events = make([]Event, 0, len(cont.Events))
for _, evt := range cont.Events {
event := Event{
t: time.Unix(*evt.Time, 0),
msg: *evt.Message,
}
contact.events = append(contact.events, event)
}
}
now := c.Now()
for _, m := range state.Inbox {
msg := &InboxMessage{
id: *m.Id,
from: *m.From,
receivedTime: time.Unix(*m.ReceivedTime, 0),
acked: *m.Acked,
read: *m.Read,
sealed: m.Sealed,
retained: m.GetRetained(),
exposureTime: now,
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in inbox: " + err.Error())
}
}
c.inbox = append(c.inbox, msg)
}
for _, m := range state.Outbox {
msg := &queuedMessage{
id: *m.Id,
to: *m.To,
server: *m.Server,
created: time.Unix(*m.Created, 0),
}
c.registerId(msg.id)
if len(m.Message) > 0 {
msg.message = new(pond.Message)
if err := proto.Unmarshal(m.Message, msg.message); err != nil {
return errors.New("client: corrupt message in outbox: " + err.Error())
}
}
if m.Sent != nil {
msg.sent = time.Unix(*m.Sent, 0)
}
if m.Acked != nil {
msg.acked = time.Unix(*m.Acked, 0)
}
if len(m.Request) != 0 {
msg.request = new(pond.Request)
if err := proto.Unmarshal(m.Request, msg.request); err != nil {
return errors.New("client: corrupt request in outbox: " + err.Error())
}
}
msg.revocation = m.GetRevocation()
if msg.revocation && len(msg.server) == 0 {
// There was a bug in some versions where revoking a
// pending contact would result in a revocation message
// with an empty server.
msg.server = c.server
}
c.outbox = append(c.outbox, msg)
if msg.sent.IsZero() && (msg.to == 0 || !c.contacts[msg.to].revokedUs) {
// This message hasn't been sent yet.
c.enqueue(msg)
}
}
for _, m := range state.Drafts {
draft := &Draft{
id: *m.Id,
body: *m.Body,
attachments: m.Attachments,
detachments: m.Detachments,
created: time.Unix(*m.Created, 0),
}
c.registerId(draft.id)
if m.To != nil {
draft.to = *m.To
}
if m.InReplyTo != nil {
draft.inReplyTo = *m.InReplyTo
}
c.drafts[draft.id] = draft
}
return nil
}
func (c *client) marshal() []byte {
var err error
var contacts []*disk.Contact
for _, contact := range c.contacts {
cont := &disk.Contact{
Id: proto.Uint64(contact.id),
Name: proto.String(contact.name),
GroupKey: contact.groupKey.Marshal(),
IsPending: proto.Bool(contact.isPending),
KeyExchangeBytes: contact.kxsBytes,
LastPrivate: contact.lastDHPrivate[:],
CurrentPrivate: contact.currentDHPrivate[:],
SupportedVersion: proto.Int32(contact.supportedVersion),
PandaKeyExchange: contact.pandaKeyExchange,
PandaError: proto.String(contact.pandaResult),
RevokedUs: proto.Bool(contact.revokedUs),
}
if !contact.isPending {
cont.MyGroupKey = contact.myGroupKey.Marshal()
cont.TheirGroup = contact.myGroupKey.Group.Marshal()
cont.TheirServer = proto.String(contact.theirServer)
cont.TheirPub = contact.theirPub[:]
cont.Generation = proto.Uint32(contact.generation)
cont.TheirIdentityPublic = contact.theirIdentityPublic[:]
cont.TheirLastPublic = contact.theirLastDHPublic[:]
cont.TheirCurrentPublic = contact.theirCurrentDHPublic[:]
}
if contact.ratchet != nil {
cont.Ratchet = contact.ratchet.Marshal(time.Now(), messageLifetime)
}
for _, prevTag := range contact.previousTags {
if time.Since(prevTag.expired) > previousTagLifetime {
continue
}
cont.PreviousTags = append(cont.PreviousTags, &disk.Contact_PreviousTag{
Tag: prevTag.tag,
Expired: proto.Int64(prevTag.expired.Unix()),
})
}
cont.Events = make([]*disk.Contact_Event, 0, len(contact.events))
for _, event := range contact.events {
if time.Since(event.t) > messageLifetime {
continue
}
cont.Events = append(cont.Events, &disk.Contact_Event{
Time: proto.Int64(event.t.Unix()),
Message: proto.String(event.msg),
})
}
contacts = append(contacts, cont)
}
var inbox []*disk.Inbox
for _, msg := range c.inbox {
if time.Since(msg.receivedTime) > messageLifetime && !msg.retained {
continue
}
m := &disk.Inbox{
Id: proto.Uint64(msg.id),
From: proto.Uint64(msg.from),
ReceivedTime: proto.Int64(msg.receivedTime.Unix()),
Acked: proto.Bool(msg.acked),
Read: proto.Bool(msg.read),
Sealed: msg.sealed,
Retained: proto.Bool(msg.retained),
}
if msg.message != nil | {
if m.Message, err = proto.Marshal(msg.message); err != nil {
panic(err)
}
} | conditional_block |
|
appstate.go | big.NewInt(0)}
acc.Amount.SetString(amount, 0)
return &acc, nil
}
// UpdateAccountCache update account in memory
func (as *AccountState) UpdateAccountCache(acc *Account) {
as.RLock()
defer as.RUnlock()
as.accounts[acc.Address] = acc
as.isDirty = true
}
//SyncToDisk cache to disk
func (as *AccountState) SyncToDisk() error {
if !as.isDirty {
return nil
}
if len(as.accounts) == 0 {
return nil
}
sqlStr := "replace into funds(address, amount) values "
for _, val := range as.accounts {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s'),", val.Address, val.Amount.String())
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err := as.db.Exec(sqlStr)
as.accounts = make(map[string]*Account)
return err
}
//Account ...
type Account struct {
Address string
Amount *big.Int
}
// NewAccount return account inst
func NewAccount(addr, amount string) *Account {
am := big.NewInt(0)
am.SetString(amount, 0)
return &Account{Address: addr, Amount: am}
}
// TxState means current tx's info
type TxState struct {
sync.RWMutex
Txs Transactions
log log.Logger
db *sqlx.DB
}
// NewTxState txstate inst
func NewTxState(db *sqlx.DB, log log.Logger) *TxState {
return &TxState{
Txs: Transactions{},
log: log,
db: db,
}
}
// UpdateTx append tx
func (txState *TxState) UpdateTx(tx *Transaction) {
txState.Lock()
defer txState.Unlock()
txState.log.Error("one tx has been executed.......")
txState.Txs = txState.Txs.AppendTx(tx)
}
// SyncToDisk write tx to db
func (txState *TxState) SyncToDisk(height int64) (hashRoot string, err error) {
if txState.Txs.Len() == 0 {
return txState.Txs.HashRoot(), nil
}
sqlStr := "replace into transaction_records(id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign) values "
for _, val := range txState.Txs {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s', '%s', '%s', '%s', '%d', '%d', '%s', '%d', '%d', '%s'),",
val.TxID(), val.Sender, val.Receiver, val.Value, val.Input, val.ExpiredNum, val.TimeStamp, val.Nonce, val.RefBlockNum, height, val.Sign)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txState.db.Exec(sqlStr)
// merkle tree
hashRoot = txState.Txs.HashRoot()
// new trans for next commit
txState.RLock()
txState.Txs = Transactions{}
txState.RUnlock()
return
}
//QueryTxsByAccount query account related tx
func (txState *TxState) QueryTxsByAccount(account string, start, offset int64) Transactions {
var txs Transactions
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where sender=? or receiver=? limit ?, ?;"
rows, err := txState.db.Queryx(sqlStr, account, account, start, offset)
if err != nil {
txState.log.Error("query txs by account failed", "err", err.Error())
return txs
}
for rows.Next() {
var tmp Transaction
rows.Scan(&tmp.ID, &tmp.Sender, &tmp.Receiver, &tmp.Value, &tmp.Input, &tmp.ExpiredNum, &tmp.TimeStamp, &tmp.Nonce, &tmp.RefBlockNum)
txs = append(txs, &tmp)
}
return txs
}
//QueryTxByID query tx by id
func (txState *TxState) QueryTxByID(id string) Transaction {
var tx Transaction
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where id=? ;"
row := txState.db.QueryRowx(sqlStr, id)
err := row.Scan(&tx.ID, &tx.Sender, &tx.Receiver, &tx.Value, &tx.Input, &tx.ExpiredNum, &tx.TimeStamp, &tx.Nonce, &tx.RefBlockNum)
if err == sql.ErrNoRows {
return tx
}
if err != nil {
txState.log.Error("query txs by id failed", "err", err.Error())
}
return tx
}
// TxRepState means current tx receipt's info
type TxRepState struct {
sync.RWMutex
Txreps TransactionReceipts
log log.Logger
db *sqlx.DB
}
// NewTxRepState tx receipt inst
func NewTxRepState(db *sqlx.DB, log log.Logger) *TxRepState {
return &TxRepState{
Txreps: TransactionReceipts{},
log: log,
db: db,
}
}
// UpdateTxRep append tx
func (txrSt *TxRepState) UpdateTxRep(tr *TransactionReceipt) {
txrSt.Lock()
defer txrSt.Unlock()
txrSt.Txreps = txrSt.Txreps.AppendTxrp(tr)
}
// SyncToDisk write tx to db
func (txrSt *TxRepState) SyncToDisk(height int64) (hashRoot string, err error) {
if txrSt.Txreps.Len() == 0 {
return txrSt.Txreps.HashRoot(), nil
}
// id | status | fee | block_num | tx_hash | log
sqlStr := "replace into transaction_receipts (id, status, fee, block_num, tx_hash, log) values "
for _, val := range txrSt.Txreps {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%d', '%s', '%d', '%s', '%s'),",
string(val.ID()), val.Status, val.Fee.String(), height, val.TxHash, val.Log)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txrSt.db.Exec(sqlStr)
// merkle tree
hashRoot = txrSt.Txreps.HashRoot()
// new trans for next commit
txrSt.RLock()
txrSt.Txreps = TransactionReceipts{}
txrSt.RUnlock()
return
}
// HeaderState appheader state
type HeaderState struct {
CurBlockNum int64 `json:"cur_block_num"`
CurBlockHash string `json:"cur_block_hash"`
CurAPPHash string `json:"cur_app_hash"`
TimeStamp int64 `json:"time_stamp"`
Fee *big.Int
db *sqlx.DB
log log.Logger
}
// LoadHeaderState from db load header
func (hdSt *HeaderState) | () error {
sqlStr := "select content from state where id=1"
var text string
err := hdSt.db.QueryRowx(sqlStr).Scan(&text)
if err == sql.ErrNoRows {
return nil
}
if err != nil {
return err
}
return json.Unmarshal([]byte(text), hdSt)
}
// SyncToDisk to db
func (hdSt *HeaderState) SyncToDisk() error {
dat, err := json.Marshal(hdSt)
if err != nil {
return err
}
sqlStr := fmt.Sprintf("replace into state (id, content) values ('%d', '%s')", 1, string(dat))
_, err = hdSt.db.Exec(sqlStr)
return err
}
// BlockState current app block state
type BlockState struct {
APPHash string
TxRoot string
TxRepRoot string
BlockHash string
BlockNum int64
TimeStamp int64
db *sqlx.DB
log log.Logger
}
// NewBlockState block state instance
func NewBlockState(db *sqlx.DB, log log.Logger) *BlockState {
return &BlockState{db: db, log: log}
}
//Hash return apphash
func (bs *BlockState) Hash() string {
code := fmt.Sprintf("block_hash=%s&block_num=%d&tx_root=%s&receipt_root=%s&time_stamp=%d",
bs.BlockHash, bs.BlockNum, bs.TxRoot, bs.TxRepRoot, bs.TimeStamp)
// bs.log.Error("show blk code ", "code", code)
dat := []byte(code)
buf := make([]byte, base64.StdEncoding.EncodedLen(len(dat)))
base64.StdEncoding.Encode(buf, dat)
bs.APPHash = hex.EncodeToString(crypto.Sha256(buf))
return bs.APPHash
}
// SyncToDisk to db
func (bs *BlockState) SyncToDisk() error {
hash := bs.Hash()
// | LoadHeaderState | identifier_name |
appstate.go | big.NewInt(0)}
acc.Amount.SetString(amount, 0)
return &acc, nil
}
// UpdateAccountCache update account in memory
func (as *AccountState) UpdateAccountCache(acc *Account) {
as.RLock()
defer as.RUnlock()
as.accounts[acc.Address] = acc
as.isDirty = true
}
//SyncToDisk cache to disk
func (as *AccountState) SyncToDisk() error {
if !as.isDirty {
return nil
}
if len(as.accounts) == 0 |
sqlStr := "replace into funds(address, amount) values "
for _, val := range as.accounts {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s'),", val.Address, val.Amount.String())
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err := as.db.Exec(sqlStr)
as.accounts = make(map[string]*Account)
return err
}
//Account ...
type Account struct {
Address string
Amount *big.Int
}
// NewAccount return account inst
func NewAccount(addr, amount string) *Account {
am := big.NewInt(0)
am.SetString(amount, 0)
return &Account{Address: addr, Amount: am}
}
// TxState means current tx's info
type TxState struct {
sync.RWMutex
Txs Transactions
log log.Logger
db *sqlx.DB
}
// NewTxState txstate inst
func NewTxState(db *sqlx.DB, log log.Logger) *TxState {
return &TxState{
Txs: Transactions{},
log: log,
db: db,
}
}
// UpdateTx append tx
func (txState *TxState) UpdateTx(tx *Transaction) {
txState.Lock()
defer txState.Unlock()
txState.log.Error("one tx has been executed.......")
txState.Txs = txState.Txs.AppendTx(tx)
}
// SyncToDisk write tx to db
func (txState *TxState) SyncToDisk(height int64) (hashRoot string, err error) {
if txState.Txs.Len() == 0 {
return txState.Txs.HashRoot(), nil
}
sqlStr := "replace into transaction_records(id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign) values "
for _, val := range txState.Txs {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s', '%s', '%s', '%s', '%d', '%d', '%s', '%d', '%d', '%s'),",
val.TxID(), val.Sender, val.Receiver, val.Value, val.Input, val.ExpiredNum, val.TimeStamp, val.Nonce, val.RefBlockNum, height, val.Sign)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txState.db.Exec(sqlStr)
// merkle tree
hashRoot = txState.Txs.HashRoot()
// new trans for next commit
txState.RLock()
txState.Txs = Transactions{}
txState.RUnlock()
return
}
//QueryTxsByAccount query account related tx
func (txState *TxState) QueryTxsByAccount(account string, start, offset int64) Transactions {
var txs Transactions
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where sender=? or receiver=? limit ?, ?;"
rows, err := txState.db.Queryx(sqlStr, account, account, start, offset)
if err != nil {
txState.log.Error("query txs by account failed", "err", err.Error())
return txs
}
for rows.Next() {
var tmp Transaction
rows.Scan(&tmp.ID, &tmp.Sender, &tmp.Receiver, &tmp.Value, &tmp.Input, &tmp.ExpiredNum, &tmp.TimeStamp, &tmp.Nonce, &tmp.RefBlockNum)
txs = append(txs, &tmp)
}
return txs
}
//QueryTxByID query tx by id
func (txState *TxState) QueryTxByID(id string) Transaction {
var tx Transaction
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where id=? ;"
row := txState.db.QueryRowx(sqlStr, id)
err := row.Scan(&tx.ID, &tx.Sender, &tx.Receiver, &tx.Value, &tx.Input, &tx.ExpiredNum, &tx.TimeStamp, &tx.Nonce, &tx.RefBlockNum)
if err == sql.ErrNoRows {
return tx
}
if err != nil {
txState.log.Error("query txs by id failed", "err", err.Error())
}
return tx
}
// TxRepState means current tx receipt's info
type TxRepState struct {
sync.RWMutex
Txreps TransactionReceipts
log log.Logger
db *sqlx.DB
}
// NewTxRepState tx receipt inst
func NewTxRepState(db *sqlx.DB, log log.Logger) *TxRepState {
return &TxRepState{
Txreps: TransactionReceipts{},
log: log,
db: db,
}
}
// UpdateTxRep append tx
func (txrSt *TxRepState) UpdateTxRep(tr *TransactionReceipt) {
txrSt.Lock()
defer txrSt.Unlock()
txrSt.Txreps = txrSt.Txreps.AppendTxrp(tr)
}
// SyncToDisk write tx to db
func (txrSt *TxRepState) SyncToDisk(height int64) (hashRoot string, err error) {
if txrSt.Txreps.Len() == 0 {
return txrSt.Txreps.HashRoot(), nil
}
// id | status | fee | block_num | tx_hash | log
sqlStr := "replace into transaction_receipts (id, status, fee, block_num, tx_hash, log) values "
for _, val := range txrSt.Txreps {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%d', '%s', '%d', '%s', '%s'),",
string(val.ID()), val.Status, val.Fee.String(), height, val.TxHash, val.Log)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txrSt.db.Exec(sqlStr)
// merkle tree
hashRoot = txrSt.Txreps.HashRoot()
// new trans for next commit
txrSt.RLock()
txrSt.Txreps = TransactionReceipts{}
txrSt.RUnlock()
return
}
// HeaderState appheader state
type HeaderState struct {
CurBlockNum int64 `json:"cur_block_num"`
CurBlockHash string `json:"cur_block_hash"`
CurAPPHash string `json:"cur_app_hash"`
TimeStamp int64 `json:"time_stamp"`
Fee *big.Int
db *sqlx.DB
log log.Logger
}
// LoadHeaderState from db load header
func (hdSt *HeaderState) LoadHeaderState() error {
sqlStr := "select content from state where id=1"
var text string
err := hdSt.db.QueryRowx(sqlStr).Scan(&text)
if err == sql.ErrNoRows {
return nil
}
if err != nil {
return err
}
return json.Unmarshal([]byte(text), hdSt)
}
// SyncToDisk to db
func (hdSt *HeaderState) SyncToDisk() error {
dat, err := json.Marshal(hdSt)
if err != nil {
return err
}
sqlStr := fmt.Sprintf("replace into state (id, content) values ('%d', '%s')", 1, string(dat))
_, err = hdSt.db.Exec(sqlStr)
return err
}
// BlockState current app block state
type BlockState struct {
APPHash string
TxRoot string
TxRepRoot string
BlockHash string
BlockNum int64
TimeStamp int64
db *sqlx.DB
log log.Logger
}
// NewBlockState block state instance
func NewBlockState(db *sqlx.DB, log log.Logger) *BlockState {
return &BlockState{db: db, log: log}
}
//Hash return apphash
func (bs *BlockState) Hash() string {
code := fmt.Sprintf("block_hash=%s&block_num=%d&tx_root=%s&receipt_root=%s&time_stamp=%d",
bs.BlockHash, bs.BlockNum, bs.TxRoot, bs.TxRepRoot, bs.TimeStamp)
// bs.log.Error("show blk code ", "code", code)
dat := []byte(code)
buf := make([]byte, base64.StdEncoding.EncodedLen(len(dat)))
base64.StdEncoding.Encode(buf, dat)
bs.APPHash = hex.EncodeToString(crypto.Sha256(buf))
return bs.APPHash
}
// SyncToDisk to db
func (bs *BlockState) SyncToDisk() error {
hash := bs.Hash()
| {
return nil
} | conditional_block |
appstate.go | big.NewInt(0)}
acc.Amount.SetString(amount, 0)
return &acc, nil
}
// UpdateAccountCache update account in memory
func (as *AccountState) UpdateAccountCache(acc *Account) {
as.RLock()
defer as.RUnlock()
as.accounts[acc.Address] = acc
as.isDirty = true
}
//SyncToDisk cache to disk
func (as *AccountState) SyncToDisk() error {
if !as.isDirty {
return nil
}
if len(as.accounts) == 0 {
return nil
}
sqlStr := "replace into funds(address, amount) values "
for _, val := range as.accounts {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s'),", val.Address, val.Amount.String())
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err := as.db.Exec(sqlStr)
as.accounts = make(map[string]*Account)
return err
}
//Account ...
type Account struct {
Address string
Amount *big.Int
}
// NewAccount return account inst
func NewAccount(addr, amount string) *Account {
am := big.NewInt(0)
am.SetString(amount, 0)
return &Account{Address: addr, Amount: am}
}
// TxState means current tx's info
type TxState struct {
sync.RWMutex
Txs Transactions
log log.Logger
db *sqlx.DB
}
// NewTxState txstate inst
func NewTxState(db *sqlx.DB, log log.Logger) *TxState |
// UpdateTx append tx
func (txState *TxState) UpdateTx(tx *Transaction) {
txState.Lock()
defer txState.Unlock()
txState.log.Error("one tx has been executed.......")
txState.Txs = txState.Txs.AppendTx(tx)
}
// SyncToDisk write tx to db
func (txState *TxState) SyncToDisk(height int64) (hashRoot string, err error) {
if txState.Txs.Len() == 0 {
return txState.Txs.HashRoot(), nil
}
sqlStr := "replace into transaction_records(id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign) values "
for _, val := range txState.Txs {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s', '%s', '%s', '%s', '%d', '%d', '%s', '%d', '%d', '%s'),",
val.TxID(), val.Sender, val.Receiver, val.Value, val.Input, val.ExpiredNum, val.TimeStamp, val.Nonce, val.RefBlockNum, height, val.Sign)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txState.db.Exec(sqlStr)
// merkle tree
hashRoot = txState.Txs.HashRoot()
// new trans for next commit
txState.RLock()
txState.Txs = Transactions{}
txState.RUnlock()
return
}
//QueryTxsByAccount query account related tx
func (txState *TxState) QueryTxsByAccount(account string, start, offset int64) Transactions {
var txs Transactions
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where sender=? or receiver=? limit ?, ?;"
rows, err := txState.db.Queryx(sqlStr, account, account, start, offset)
if err != nil {
txState.log.Error("query txs by account failed", "err", err.Error())
return txs
}
for rows.Next() {
var tmp Transaction
rows.Scan(&tmp.ID, &tmp.Sender, &tmp.Receiver, &tmp.Value, &tmp.Input, &tmp.ExpiredNum, &tmp.TimeStamp, &tmp.Nonce, &tmp.RefBlockNum)
txs = append(txs, &tmp)
}
return txs
}
//QueryTxByID query tx by id
func (txState *TxState) QueryTxByID(id string) Transaction {
var tx Transaction
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where id=? ;"
row := txState.db.QueryRowx(sqlStr, id)
err := row.Scan(&tx.ID, &tx.Sender, &tx.Receiver, &tx.Value, &tx.Input, &tx.ExpiredNum, &tx.TimeStamp, &tx.Nonce, &tx.RefBlockNum)
if err == sql.ErrNoRows {
return tx
}
if err != nil {
txState.log.Error("query txs by id failed", "err", err.Error())
}
return tx
}
// TxRepState means current tx receipt's info
type TxRepState struct {
sync.RWMutex
Txreps TransactionReceipts
log log.Logger
db *sqlx.DB
}
// NewTxRepState tx receipt inst
func NewTxRepState(db *sqlx.DB, log log.Logger) *TxRepState {
return &TxRepState{
Txreps: TransactionReceipts{},
log: log,
db: db,
}
}
// UpdateTxRep append tx
func (txrSt *TxRepState) UpdateTxRep(tr *TransactionReceipt) {
txrSt.Lock()
defer txrSt.Unlock()
txrSt.Txreps = txrSt.Txreps.AppendTxrp(tr)
}
// SyncToDisk write tx to db
func (txrSt *TxRepState) SyncToDisk(height int64) (hashRoot string, err error) {
if txrSt.Txreps.Len() == 0 {
return txrSt.Txreps.HashRoot(), nil
}
// id | status | fee | block_num | tx_hash | log
sqlStr := "replace into transaction_receipts (id, status, fee, block_num, tx_hash, log) values "
for _, val := range txrSt.Txreps {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%d', '%s', '%d', '%s', '%s'),",
string(val.ID()), val.Status, val.Fee.String(), height, val.TxHash, val.Log)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txrSt.db.Exec(sqlStr)
// merkle tree
hashRoot = txrSt.Txreps.HashRoot()
// new trans for next commit
txrSt.RLock()
txrSt.Txreps = TransactionReceipts{}
txrSt.RUnlock()
return
}
// HeaderState appheader state
type HeaderState struct {
CurBlockNum int64 `json:"cur_block_num"`
CurBlockHash string `json:"cur_block_hash"`
CurAPPHash string `json:"cur_app_hash"`
TimeStamp int64 `json:"time_stamp"`
Fee *big.Int
db *sqlx.DB
log log.Logger
}
// LoadHeaderState from db load header
func (hdSt *HeaderState) LoadHeaderState() error {
sqlStr := "select content from state where id=1"
var text string
err := hdSt.db.QueryRowx(sqlStr).Scan(&text)
if err == sql.ErrNoRows {
return nil
}
if err != nil {
return err
}
return json.Unmarshal([]byte(text), hdSt)
}
// SyncToDisk to db
func (hdSt *HeaderState) SyncToDisk() error {
dat, err := json.Marshal(hdSt)
if err != nil {
return err
}
sqlStr := fmt.Sprintf("replace into state (id, content) values ('%d', '%s')", 1, string(dat))
_, err = hdSt.db.Exec(sqlStr)
return err
}
// BlockState current app block state
type BlockState struct {
APPHash string
TxRoot string
TxRepRoot string
BlockHash string
BlockNum int64
TimeStamp int64
db *sqlx.DB
log log.Logger
}
// NewBlockState block state instance
func NewBlockState(db *sqlx.DB, log log.Logger) *BlockState {
return &BlockState{db: db, log: log}
}
//Hash return apphash
func (bs *BlockState) Hash() string {
code := fmt.Sprintf("block_hash=%s&block_num=%d&tx_root=%s&receipt_root=%s&time_stamp=%d",
bs.BlockHash, bs.BlockNum, bs.TxRoot, bs.TxRepRoot, bs.TimeStamp)
// bs.log.Error("show blk code ", "code", code)
dat := []byte(code)
buf := make([]byte, base64.StdEncoding.EncodedLen(len(dat)))
base64.StdEncoding.Encode(buf, dat)
bs.APPHash = hex.EncodeToString(crypto.Sha256(buf))
return bs.APPHash
}
// SyncToDisk to db
func (bs *BlockState) SyncToDisk() error {
hash := bs.Hash()
| {
return &TxState{
Txs: Transactions{},
log: log,
db: db,
}
} | identifier_body |
appstate.go | // AccountState means current account's info
type AccountState struct {
sync.RWMutex
accounts map[string]*Account
log log.Logger
db *sqlx.DB
isDirty bool
}
// NewAccountState return AccountState inst
func NewAccountState(db *sqlx.DB, log log.Logger) *AccountState {
return &AccountState{
accounts: make(map[string]*Account),
log: log,
db: db,
isDirty: false,
}
}
// LoadAccount get account from cache or db
func (as *AccountState) LoadAccount(address string) (*Account, error) {
as.RLock()
defer as.RUnlock()
if k, ok := as.accounts[address]; ok {
return k, nil
}
// try load from db
acc, err := as.selectAccount(address)
if err == nil {
as.accounts[address] = acc
}
return acc, err
}
func (as *AccountState) selectAccount(addr string) (*Account, error) {
sqlStr := "select amount from funds where address = ?"
var amount string
err := as.db.QueryRowx(sqlStr, addr).Scan(&amount)
if err == sql.ErrNoRows {
return &Account{Address: addr, Amount: new(big.Int).SetInt64(0)}, nil
}
if err != nil {
return nil, err
}
acc := Account{Address: addr, Amount: big.NewInt(0)}
acc.Amount.SetString(amount, 0)
return &acc, nil
}
// UpdateAccountCache update account in memory
func (as *AccountState) UpdateAccountCache(acc *Account) {
as.RLock()
defer as.RUnlock()
as.accounts[acc.Address] = acc
as.isDirty = true
}
//SyncToDisk cache to disk
func (as *AccountState) SyncToDisk() error {
if !as.isDirty {
return nil
}
if len(as.accounts) == 0 {
return nil
}
sqlStr := "replace into funds(address, amount) values "
for _, val := range as.accounts {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s'),", val.Address, val.Amount.String())
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err := as.db.Exec(sqlStr)
as.accounts = make(map[string]*Account)
return err
}
//Account ...
type Account struct {
Address string
Amount *big.Int
}
// NewAccount return account inst
func NewAccount(addr, amount string) *Account {
am := big.NewInt(0)
am.SetString(amount, 0)
return &Account{Address: addr, Amount: am}
}
// TxState means current tx's info
type TxState struct {
sync.RWMutex
Txs Transactions
log log.Logger
db *sqlx.DB
}
// NewTxState txstate inst
func NewTxState(db *sqlx.DB, log log.Logger) *TxState {
return &TxState{
Txs: Transactions{},
log: log,
db: db,
}
}
// UpdateTx append tx
func (txState *TxState) UpdateTx(tx *Transaction) {
txState.Lock()
defer txState.Unlock()
txState.log.Error("one tx has been executed.......")
txState.Txs = txState.Txs.AppendTx(tx)
}
// SyncToDisk write tx to db
func (txState *TxState) SyncToDisk(height int64) (hashRoot string, err error) {
if txState.Txs.Len() == 0 {
return txState.Txs.HashRoot(), nil
}
sqlStr := "replace into transaction_records(id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign) values "
for _, val := range txState.Txs {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%s', '%s', '%s', '%s', '%d', '%d', '%s', '%d', '%d', '%s'),",
val.TxID(), val.Sender, val.Receiver, val.Value, val.Input, val.ExpiredNum, val.TimeStamp, val.Nonce, val.RefBlockNum, height, val.Sign)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txState.db.Exec(sqlStr)
// merkle tree
hashRoot = txState.Txs.HashRoot()
// new trans for next commit
txState.RLock()
txState.Txs = Transactions{}
txState.RUnlock()
return
}
//QueryTxsByAccount query account related tx
func (txState *TxState) QueryTxsByAccount(account string, start, offset int64) Transactions {
var txs Transactions
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where sender=? or receiver=? limit ?, ?;"
rows, err := txState.db.Queryx(sqlStr, account, account, start, offset)
if err != nil {
txState.log.Error("query txs by account failed", "err", err.Error())
return txs
}
for rows.Next() {
var tmp Transaction
rows.Scan(&tmp.ID, &tmp.Sender, &tmp.Receiver, &tmp.Value, &tmp.Input, &tmp.ExpiredNum, &tmp.TimeStamp, &tmp.Nonce, &tmp.RefBlockNum)
txs = append(txs, &tmp)
}
return txs
}
//QueryTxByID query tx by id
func (txState *TxState) QueryTxByID(id string) Transaction {
var tx Transaction
// id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num, block_num, sign
sqlStr := "select id, sender, receiver, amount, input, expired, time_stamp, nonce, ref_block_num from transaction_records where id=? ;"
row := txState.db.QueryRowx(sqlStr, id)
err := row.Scan(&tx.ID, &tx.Sender, &tx.Receiver, &tx.Value, &tx.Input, &tx.ExpiredNum, &tx.TimeStamp, &tx.Nonce, &tx.RefBlockNum)
if err == sql.ErrNoRows {
return tx
}
if err != nil {
txState.log.Error("query txs by id failed", "err", err.Error())
}
return tx
}
// TxRepState means current tx receipt's info
type TxRepState struct {
sync.RWMutex
Txreps TransactionReceipts
log log.Logger
db *sqlx.DB
}
// NewTxRepState tx receipt inst
func NewTxRepState(db *sqlx.DB, log log.Logger) *TxRepState {
return &TxRepState{
Txreps: TransactionReceipts{},
log: log,
db: db,
}
}
// UpdateTxRep append tx
func (txrSt *TxRepState) UpdateTxRep(tr *TransactionReceipt) {
txrSt.Lock()
defer txrSt.Unlock()
txrSt.Txreps = txrSt.Txreps.AppendTxrp(tr)
}
// SyncToDisk write tx to db
func (txrSt *TxRepState) SyncToDisk(height int64) (hashRoot string, err error) {
if txrSt.Txreps.Len() == 0 {
return txrSt.Txreps.HashRoot(), nil
}
// id | status | fee | block_num | tx_hash | log
sqlStr := "replace into transaction_receipts (id, status, fee, block_num, tx_hash, log) values "
for _, val := range txrSt.Txreps {
sqlStr = sqlStr + fmt.Sprintf(" ('%s', '%d', '%s', '%d', '%s', '%s'),",
string(val.ID()), val.Status, val.Fee.String(), height, val.TxHash, val.Log)
}
sqlStr = sqlStr[0 : len(sqlStr)-1]
_, err = txrSt.db.Exec(sqlStr)
// merkle tree
hashRoot = txrSt.Txreps.HashRoot()
// new trans for next commit
txrSt.RLock()
txrSt.Txreps = TransactionReceipts{}
txrSt.RUnlock()
return
}
// HeaderState appheader state
type HeaderState struct {
CurBlockNum int64 `json:"cur_block_num"`
CurBlockHash string `json:"cur_block_hash"`
CurAPPHash string `json:"cur_app_hash"`
TimeStamp int64 `json:"time_stamp"`
Fee *big.Int
db *sqlx.DB
log log.Logger
}
// LoadHeaderState from db load header
func (hdSt *HeaderState) LoadHeaderState() error {
sqlStr := "select content from state where id=1"
var text string
err := hdSt.db.QueryRowx(sqlStr).Scan(&text)
if err == sql.ErrNoRows {
return nil
}
if err != nil {
return err
}
return json.Unmarshal([]byte(text), hdSt)
}
// SyncToDisk to db
func (hdSt *HeaderState) SyncToDisk() error {
dat, err := json.Marshal(hdSt)
if err != nil {
return err
| random_line_split |
||
list.rs | `], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]: ../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_child_mut(&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if !self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) |
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if !self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics | {
return child.find_id(coord);
} | conditional_block |
list.rs | Row`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]: ../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn | (&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if !self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if !self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics | get_child_mut | identifier_name |
list.rs | } else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if !self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if !self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, index: usize, mut widget: W) -> (W, TkAction) {
std::mem::swap(&mut widget, &mut self.widgets[index]);
(widget, TkAction::RECONFIGURE)
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) -> TkAction {
let len = self.widgets.len();
self.widgets.extend(iter);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, len: usize, f: F) -> TkAction {
let l0 = self.widgets.len();
if l0 == len {
return TkAction::empty();
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
TkAction::RECONFIGURE
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, f: F) -> TkAction {
let len = self.widgets.len();
self.widgets.retain(f);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &W> {
ListIter {
list: &self.widgets,
}
}
/// Get the index of the child which is an ancestor of `id`, if any
pub fn find_child_index(&self, id: WidgetId) -> Option<usize> {
if id >= self.first_id {
for (i, child) in self.widgets.iter().enumerate() {
if id <= child.id() {
return Some(i);
}
}
}
None
}
}
impl<D: Directional, W: Widget> Index<usize> for List<D, W> {
type Output = W;
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<D: Directional, W: Widget> IndexMut<usize> for List<D, W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [W],
} | impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a W;
fn next(&mut self) -> Option<Self::Item> {
if !self.list.is_empty() {
let item = &self.list[0]; | random_line_split |
|
list.rs | `], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]: ../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_child_mut(&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if !self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if !self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize |
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics | {
self.widgets.capacity()
} | identifier_body |
config_global.go | ClientName string `yaml:"well_known_client_name"`
// The server name to delegate sliding sync communications to, with optional port.
// Requires `well_known_client_name` to also be configured.
WellKnownSlidingSyncProxy string `yaml:"well_known_sliding_sync_proxy"`
// Disables federation. Dendrite will not be able to make any outbound HTTP requests
// to other servers and the federation API will not be exposed.
DisableFederation bool `yaml:"disable_federation"`
// Configures the handling of presence events.
Presence PresenceOptions `yaml:"presence"`
// List of domains that the server will trust as identity servers to
// verify third-party identifiers.
// Defaults to an empty array.
TrustedIDServers []string `yaml:"trusted_third_party_id_servers"`
// JetStream configuration
JetStream JetStream `yaml:"jetstream"`
// Metrics configuration
Metrics Metrics `yaml:"metrics"`
// Sentry configuration
Sentry Sentry `yaml:"sentry"`
// DNS caching options for all outbound HTTP requests
DNSCache DNSCacheOptions `yaml:"dns_cache"`
// ServerNotices configuration used for sending server notices
ServerNotices ServerNotices `yaml:"server_notices"`
// ReportStats configures opt-in phone-home statistics reporting.
ReportStats ReportStats `yaml:"report_stats"`
// Configuration for the caches.
Cache Cache `yaml:"cache"`
}
func (c *Global) Defaults(opts DefaultOpts) {
if opts.Generate {
c.ServerName = "localhost"
c.PrivateKeyPath = "matrix_key.pem"
_, c.PrivateKey, _ = ed25519.GenerateKey(rand.New(rand.NewSource(0)))
c.KeyID = "ed25519:auto"
c.TrustedIDServers = []string{
"matrix.org",
"vector.im",
}
}
c.KeyValidityPeriod = time.Hour * 24 * 7
if opts.SingleDatabase {
c.DatabaseOptions.Defaults(90)
}
c.JetStream.Defaults(opts)
c.Metrics.Defaults(opts)
c.DNSCache.Defaults()
c.Sentry.Defaults()
c.ServerNotices.Defaults(opts)
c.ReportStats.Defaults()
c.Cache.Defaults()
}
func (c *Global) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
for _, v := range c.VirtualHosts {
v.Verify(configErrs)
}
c.JetStream.Verify(configErrs)
c.Metrics.Verify(configErrs)
c.Sentry.Verify(configErrs)
c.DNSCache.Verify(configErrs)
c.ServerNotices.Verify(configErrs)
c.ReportStats.Verify(configErrs)
c.Cache.Verify(configErrs)
}
func (c *Global) IsLocalServerName(serverName spec.ServerName) bool {
if c.ServerName == serverName {
return true
}
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return true
}
}
return false
}
func (c *Global) SplitLocalID(sigil byte, id string) (string, spec.ServerName, error) {
u, s, err := gomatrixserverlib.SplitID(sigil, id)
if err != nil {
return u, s, err
}
if !c.IsLocalServerName(s) {
return u, s, fmt.Errorf("server name %q not known", s)
}
return u, s, nil
}
func (c *Global) VirtualHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
}
return nil
}
func (c *Global) VirtualHostForHTTPHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
for _, h := range v.MatchHTTPHosts {
if h == serverName {
return v
}
}
}
return nil
}
func (c *Global) SigningIdentityFor(serverName spec.ServerName) (*fclient.SigningIdentity, error) {
for _, id := range c.SigningIdentities() {
if id.ServerName == serverName {
return id, nil
}
}
return nil, fmt.Errorf("no signing identity for %q", serverName)
}
func (c *Global) SigningIdentities() []*fclient.SigningIdentity {
identities := make([]*fclient.SigningIdentity, 0, len(c.VirtualHosts)+1)
identities = append(identities, &c.SigningIdentity)
for _, v := range c.VirtualHosts {
identities = append(identities, &v.SigningIdentity)
}
return identities
}
type VirtualHost struct {
// Signing identity contains the server name, private key and key ID of
// the virtual host.
fclient.SigningIdentity `yaml:",inline"`
// Path to the private key. If not specified, the default global private key
// will be used instead.
PrivateKeyPath Path `yaml:"private_key"`
// How long a remote server can cache our server key for before requesting it again.
// Increasing this number will reduce the number of requests made by remote servers
// for our key, but increases the period a compromised key will be considered valid
// by remote servers.
// Defaults to 24 hours.
KeyValidityPeriod time.Duration `yaml:"key_validity_period"`
// Match these HTTP Host headers on the `/key/v2/server` endpoint, this needs
// to match all delegated names, likely including the port number too if
// the well-known delegation includes that also.
MatchHTTPHosts []spec.ServerName `yaml:"match_http_hosts"`
// Is registration enabled on this virtual host?
AllowRegistration bool `yaml:"allow_registration"`
// Is guest registration enabled on this virtual host?
AllowGuests bool `yaml:"allow_guests"`
}
func (v *VirtualHost) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "virtual_host.*.server_name", string(v.ServerName))
}
// RegistrationAllowed returns two bools, the first states whether registration
// is allowed for this virtual host and the second states whether guests are
// allowed for this virtual host.
func (v *VirtualHost) RegistrationAllowed() (bool, bool) {
if v == nil {
return false, false
}
return v.AllowRegistration, v.AllowGuests
}
type OldVerifyKeys struct {
// Path to the private key.
PrivateKeyPath Path `yaml:"private_key"`
// The private key itself.
PrivateKey ed25519.PrivateKey `yaml:"-"`
// The public key, in case only that part is known.
PublicKey spec.Base64Bytes `yaml:"public_key"`
// The key ID of the private key.
KeyID gomatrixserverlib.KeyID `yaml:"key_id"`
// When the private key was designed as "expired", as a UNIX timestamp
// in millisecond precision.
ExpiredAt spec.Timestamp `yaml:"expired_at"`
}
// The configuration to use for Prometheus metrics
type Metrics struct {
// Whether or not the metrics are enabled
Enabled bool `yaml:"enabled"`
// Use BasicAuth for Authorization
BasicAuth struct {
// Authorization via Static Username & Password
// Hardcoded Username and Password
Username string `yaml:"username"`
Password string `yaml:"password"`
} `yaml:"basic_auth"`
}
func (c *Metrics) Defaults(opts DefaultOpts) {
c.Enabled = false
if opts.Generate {
c.BasicAuth.Username = "metrics"
c.BasicAuth.Password = "metrics"
}
}
func (c *Metrics) | (configErrs *ConfigErrors) {
}
// ServerNotices defines the configuration used for sending server notices
type ServerNotices struct {
Enabled bool `yaml:"enabled"`
// The localpart to be used when sending notices
LocalPart string `yaml:"local_part"`
// The displayname to be used when sending notices
DisplayName string `yaml:"display_name"`
// The avatar of this user
AvatarURL string `yaml:"avatar_url"`
// The roomname to be used when creating messages
RoomName string `yaml:"room_name"`
}
func (c *ServerNotices) Defaults(opts DefaultOpts) {
if opts.Generate {
c.Enabled = true
c.LocalPart = "_server"
c.DisplayName = "Server Alert"
c.RoomName = "Server Alert"
c.AvatarURL = ""
}
}
func (c *ServerNotices) Verify(errors *ConfigErrors) {}
type Cache struct {
EstimatedMaxSize DataUnit `yaml:"max_size_estimated"`
MaxAge time.Duration `yaml:"max_age"`
}
func (c *Cache) Defaults() {
c.EstimatedMaxSize = 1024 * 1024 * 1024 // 1GB
c.MaxAge = time.Hour
}
func (c *Cache) Verify(errors *ConfigErrors) {
checkPositive(errors, "max_size_estimated", int64(c.EstimatedMaxSize))
}
// ReportStats configures opt-in phone-home statistics reporting.
type ReportStats struct {
// Enabled configures phone-home statistics of the server
Enabled bool `yaml:"enabled"`
// | Verify | identifier_name |
config_global.go | s()
c.Cache.Defaults()
}
func (c *Global) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
for _, v := range c.VirtualHosts {
v.Verify(configErrs)
}
c.JetStream.Verify(configErrs)
c.Metrics.Verify(configErrs)
c.Sentry.Verify(configErrs)
c.DNSCache.Verify(configErrs)
c.ServerNotices.Verify(configErrs)
c.ReportStats.Verify(configErrs)
c.Cache.Verify(configErrs)
}
func (c *Global) IsLocalServerName(serverName spec.ServerName) bool {
if c.ServerName == serverName {
return true
}
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return true
}
}
return false
}
func (c *Global) SplitLocalID(sigil byte, id string) (string, spec.ServerName, error) {
u, s, err := gomatrixserverlib.SplitID(sigil, id)
if err != nil {
return u, s, err
}
if !c.IsLocalServerName(s) {
return u, s, fmt.Errorf("server name %q not known", s)
}
return u, s, nil
}
func (c *Global) VirtualHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
}
return nil
}
func (c *Global) VirtualHostForHTTPHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
for _, h := range v.MatchHTTPHosts {
if h == serverName {
return v
}
}
}
return nil
}
func (c *Global) SigningIdentityFor(serverName spec.ServerName) (*fclient.SigningIdentity, error) {
for _, id := range c.SigningIdentities() {
if id.ServerName == serverName {
return id, nil
}
}
return nil, fmt.Errorf("no signing identity for %q", serverName)
}
func (c *Global) SigningIdentities() []*fclient.SigningIdentity {
identities := make([]*fclient.SigningIdentity, 0, len(c.VirtualHosts)+1)
identities = append(identities, &c.SigningIdentity)
for _, v := range c.VirtualHosts {
identities = append(identities, &v.SigningIdentity)
}
return identities
}
type VirtualHost struct {
// Signing identity contains the server name, private key and key ID of
// the virtual host.
fclient.SigningIdentity `yaml:",inline"`
// Path to the private key. If not specified, the default global private key
// will be used instead.
PrivateKeyPath Path `yaml:"private_key"`
// How long a remote server can cache our server key for before requesting it again.
// Increasing this number will reduce the number of requests made by remote servers
// for our key, but increases the period a compromised key will be considered valid
// by remote servers.
// Defaults to 24 hours.
KeyValidityPeriod time.Duration `yaml:"key_validity_period"`
// Match these HTTP Host headers on the `/key/v2/server` endpoint, this needs
// to match all delegated names, likely including the port number too if
// the well-known delegation includes that also.
MatchHTTPHosts []spec.ServerName `yaml:"match_http_hosts"`
// Is registration enabled on this virtual host?
AllowRegistration bool `yaml:"allow_registration"`
// Is guest registration enabled on this virtual host?
AllowGuests bool `yaml:"allow_guests"`
}
func (v *VirtualHost) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "virtual_host.*.server_name", string(v.ServerName))
}
// RegistrationAllowed returns two bools, the first states whether registration
// is allowed for this virtual host and the second states whether guests are
// allowed for this virtual host.
func (v *VirtualHost) RegistrationAllowed() (bool, bool) {
if v == nil {
return false, false
}
return v.AllowRegistration, v.AllowGuests
}
type OldVerifyKeys struct {
// Path to the private key.
PrivateKeyPath Path `yaml:"private_key"`
// The private key itself.
PrivateKey ed25519.PrivateKey `yaml:"-"`
// The public key, in case only that part is known.
PublicKey spec.Base64Bytes `yaml:"public_key"`
// The key ID of the private key.
KeyID gomatrixserverlib.KeyID `yaml:"key_id"`
// When the private key was designed as "expired", as a UNIX timestamp
// in millisecond precision.
ExpiredAt spec.Timestamp `yaml:"expired_at"`
}
// The configuration to use for Prometheus metrics
type Metrics struct {
// Whether or not the metrics are enabled
Enabled bool `yaml:"enabled"`
// Use BasicAuth for Authorization
BasicAuth struct {
// Authorization via Static Username & Password
// Hardcoded Username and Password
Username string `yaml:"username"`
Password string `yaml:"password"`
} `yaml:"basic_auth"`
}
func (c *Metrics) Defaults(opts DefaultOpts) {
c.Enabled = false
if opts.Generate {
c.BasicAuth.Username = "metrics"
c.BasicAuth.Password = "metrics"
}
}
func (c *Metrics) Verify(configErrs *ConfigErrors) {
}
// ServerNotices defines the configuration used for sending server notices
type ServerNotices struct {
Enabled bool `yaml:"enabled"`
// The localpart to be used when sending notices
LocalPart string `yaml:"local_part"`
// The displayname to be used when sending notices
DisplayName string `yaml:"display_name"`
// The avatar of this user
AvatarURL string `yaml:"avatar_url"`
// The roomname to be used when creating messages
RoomName string `yaml:"room_name"`
}
func (c *ServerNotices) Defaults(opts DefaultOpts) {
if opts.Generate {
c.Enabled = true
c.LocalPart = "_server"
c.DisplayName = "Server Alert"
c.RoomName = "Server Alert"
c.AvatarURL = ""
}
}
func (c *ServerNotices) Verify(errors *ConfigErrors) {}
type Cache struct {
EstimatedMaxSize DataUnit `yaml:"max_size_estimated"`
MaxAge time.Duration `yaml:"max_age"`
}
func (c *Cache) Defaults() {
c.EstimatedMaxSize = 1024 * 1024 * 1024 // 1GB
c.MaxAge = time.Hour
}
func (c *Cache) Verify(errors *ConfigErrors) {
checkPositive(errors, "max_size_estimated", int64(c.EstimatedMaxSize))
}
// ReportStats configures opt-in phone-home statistics reporting.
type ReportStats struct {
// Enabled configures phone-home statistics of the server
Enabled bool `yaml:"enabled"`
// Endpoint the endpoint to report stats to
Endpoint string `yaml:"endpoint"`
}
func (c *ReportStats) Defaults() {
c.Enabled = false
c.Endpoint = "https://panopticon.matrix.org/push"
}
func (c *ReportStats) Verify(configErrs *ConfigErrors) {
// We prefer to hit panopticon (https://github.com/matrix-org/panopticon) directly over
// the "old" matrix.org endpoint.
if c.Endpoint == "https://matrix.org/report-usage-stats/push" {
c.Endpoint = "https://panopticon.matrix.org/push"
}
if c.Enabled {
checkNotEmpty(configErrs, "global.report_stats.endpoint", c.Endpoint)
}
}
// The configuration to use for Sentry error reporting
type Sentry struct {
Enabled bool `yaml:"enabled"`
// The DSN to connect to e.g "https://[email protected]/0"
// See https://docs.sentry.io/platforms/go/configuration/options/
DSN string `yaml:"dsn"`
// The environment e.g "production"
// See https://docs.sentry.io/platforms/go/configuration/environments/
Environment string `yaml:"environment"`
}
func (c *Sentry) Defaults() {
c.Enabled = false
}
func (c *Sentry) Verify(configErrs *ConfigErrors) {
}
type DatabaseOptions struct {
// The connection string, file:filename.db or postgres://server....
ConnectionString DataSource `yaml:"connection_string"`
// Maximum open connections to the DB (0 = use default, negative means unlimited)
MaxOpenConnections int `yaml:"max_open_conns"`
// Maximum idle connections to the DB (0 = use default, negative means unlimited)
MaxIdleConnections int `yaml:"max_idle_conns"`
// maximum amount of time (in seconds) a connection may be reused (<= 0 means unlimited)
ConnMaxLifetimeSeconds int `yaml:"conn_max_lifetime"`
}
func (c *DatabaseOptions) Defaults(conns int) {
c.MaxOpenConnections = conns
c.MaxIdleConnections = 2
c.ConnMaxLifetimeSeconds = -1
}
| func (c *DatabaseOptions) Verify(configErrs *ConfigErrors) {}
| random_line_split |
|
config_global.go | server will trust as identity servers to
// verify third-party identifiers.
// Defaults to an empty array.
TrustedIDServers []string `yaml:"trusted_third_party_id_servers"`
// JetStream configuration
JetStream JetStream `yaml:"jetstream"`
// Metrics configuration
Metrics Metrics `yaml:"metrics"`
// Sentry configuration
Sentry Sentry `yaml:"sentry"`
// DNS caching options for all outbound HTTP requests
DNSCache DNSCacheOptions `yaml:"dns_cache"`
// ServerNotices configuration used for sending server notices
ServerNotices ServerNotices `yaml:"server_notices"`
// ReportStats configures opt-in phone-home statistics reporting.
ReportStats ReportStats `yaml:"report_stats"`
// Configuration for the caches.
Cache Cache `yaml:"cache"`
}
func (c *Global) Defaults(opts DefaultOpts) {
if opts.Generate {
c.ServerName = "localhost"
c.PrivateKeyPath = "matrix_key.pem"
_, c.PrivateKey, _ = ed25519.GenerateKey(rand.New(rand.NewSource(0)))
c.KeyID = "ed25519:auto"
c.TrustedIDServers = []string{
"matrix.org",
"vector.im",
}
}
c.KeyValidityPeriod = time.Hour * 24 * 7
if opts.SingleDatabase {
c.DatabaseOptions.Defaults(90)
}
c.JetStream.Defaults(opts)
c.Metrics.Defaults(opts)
c.DNSCache.Defaults()
c.Sentry.Defaults()
c.ServerNotices.Defaults(opts)
c.ReportStats.Defaults()
c.Cache.Defaults()
}
func (c *Global) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
for _, v := range c.VirtualHosts {
v.Verify(configErrs)
}
c.JetStream.Verify(configErrs)
c.Metrics.Verify(configErrs)
c.Sentry.Verify(configErrs)
c.DNSCache.Verify(configErrs)
c.ServerNotices.Verify(configErrs)
c.ReportStats.Verify(configErrs)
c.Cache.Verify(configErrs)
}
func (c *Global) IsLocalServerName(serverName spec.ServerName) bool {
if c.ServerName == serverName {
return true
}
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return true
}
}
return false
}
func (c *Global) SplitLocalID(sigil byte, id string) (string, spec.ServerName, error) {
u, s, err := gomatrixserverlib.SplitID(sigil, id)
if err != nil {
return u, s, err
}
if !c.IsLocalServerName(s) {
return u, s, fmt.Errorf("server name %q not known", s)
}
return u, s, nil
}
func (c *Global) VirtualHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
}
return nil
}
func (c *Global) VirtualHostForHTTPHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
for _, h := range v.MatchHTTPHosts {
if h == serverName {
return v
}
}
}
return nil
}
func (c *Global) SigningIdentityFor(serverName spec.ServerName) (*fclient.SigningIdentity, error) {
for _, id := range c.SigningIdentities() {
if id.ServerName == serverName {
return id, nil
}
}
return nil, fmt.Errorf("no signing identity for %q", serverName)
}
func (c *Global) SigningIdentities() []*fclient.SigningIdentity {
identities := make([]*fclient.SigningIdentity, 0, len(c.VirtualHosts)+1)
identities = append(identities, &c.SigningIdentity)
for _, v := range c.VirtualHosts {
identities = append(identities, &v.SigningIdentity)
}
return identities
}
type VirtualHost struct {
// Signing identity contains the server name, private key and key ID of
// the virtual host.
fclient.SigningIdentity `yaml:",inline"`
// Path to the private key. If not specified, the default global private key
// will be used instead.
PrivateKeyPath Path `yaml:"private_key"`
// How long a remote server can cache our server key for before requesting it again.
// Increasing this number will reduce the number of requests made by remote servers
// for our key, but increases the period a compromised key will be considered valid
// by remote servers.
// Defaults to 24 hours.
KeyValidityPeriod time.Duration `yaml:"key_validity_period"`
// Match these HTTP Host headers on the `/key/v2/server` endpoint, this needs
// to match all delegated names, likely including the port number too if
// the well-known delegation includes that also.
MatchHTTPHosts []spec.ServerName `yaml:"match_http_hosts"`
// Is registration enabled on this virtual host?
AllowRegistration bool `yaml:"allow_registration"`
// Is guest registration enabled on this virtual host?
AllowGuests bool `yaml:"allow_guests"`
}
func (v *VirtualHost) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "virtual_host.*.server_name", string(v.ServerName))
}
// RegistrationAllowed returns two bools, the first states whether registration
// is allowed for this virtual host and the second states whether guests are
// allowed for this virtual host.
func (v *VirtualHost) RegistrationAllowed() (bool, bool) {
if v == nil {
return false, false
}
return v.AllowRegistration, v.AllowGuests
}
type OldVerifyKeys struct {
// Path to the private key.
PrivateKeyPath Path `yaml:"private_key"`
// The private key itself.
PrivateKey ed25519.PrivateKey `yaml:"-"`
// The public key, in case only that part is known.
PublicKey spec.Base64Bytes `yaml:"public_key"`
// The key ID of the private key.
KeyID gomatrixserverlib.KeyID `yaml:"key_id"`
// When the private key was designed as "expired", as a UNIX timestamp
// in millisecond precision.
ExpiredAt spec.Timestamp `yaml:"expired_at"`
}
// The configuration to use for Prometheus metrics
type Metrics struct {
// Whether or not the metrics are enabled
Enabled bool `yaml:"enabled"`
// Use BasicAuth for Authorization
BasicAuth struct {
// Authorization via Static Username & Password
// Hardcoded Username and Password
Username string `yaml:"username"`
Password string `yaml:"password"`
} `yaml:"basic_auth"`
}
func (c *Metrics) Defaults(opts DefaultOpts) {
c.Enabled = false
if opts.Generate {
c.BasicAuth.Username = "metrics"
c.BasicAuth.Password = "metrics"
}
}
func (c *Metrics) Verify(configErrs *ConfigErrors) {
}
// ServerNotices defines the configuration used for sending server notices
type ServerNotices struct {
Enabled bool `yaml:"enabled"`
// The localpart to be used when sending notices
LocalPart string `yaml:"local_part"`
// The displayname to be used when sending notices
DisplayName string `yaml:"display_name"`
// The avatar of this user
AvatarURL string `yaml:"avatar_url"`
// The roomname to be used when creating messages
RoomName string `yaml:"room_name"`
}
func (c *ServerNotices) Defaults(opts DefaultOpts) {
if opts.Generate {
c.Enabled = true
c.LocalPart = "_server"
c.DisplayName = "Server Alert"
c.RoomName = "Server Alert"
c.AvatarURL = ""
}
}
func (c *ServerNotices) Verify(errors *ConfigErrors) {}
type Cache struct {
EstimatedMaxSize DataUnit `yaml:"max_size_estimated"`
MaxAge time.Duration `yaml:"max_age"`
}
func (c *Cache) Defaults() {
c.EstimatedMaxSize = 1024 * 1024 * 1024 // 1GB
c.MaxAge = time.Hour
}
func (c *Cache) Verify(errors *ConfigErrors) {
checkPositive(errors, "max_size_estimated", int64(c.EstimatedMaxSize))
}
// ReportStats configures opt-in phone-home statistics reporting.
type ReportStats struct {
// Enabled configures phone-home statistics of the server
Enabled bool `yaml:"enabled"`
// Endpoint the endpoint to report stats to
Endpoint string `yaml:"endpoint"`
}
func (c *ReportStats) Defaults() {
c.Enabled = false
c.Endpoint = "https://panopticon.matrix.org/push"
}
func (c *ReportStats) Verify(configErrs *ConfigErrors) {
// We prefer to hit panopticon (https://github.com/matrix-org/panopticon) directly over
// the "old" matrix.org endpoint.
if c.Endpoint == "https://matrix.org/report-usage-stats/push" | {
c.Endpoint = "https://panopticon.matrix.org/push"
} | conditional_block |
|
config_global.go | ClientName string `yaml:"well_known_client_name"`
// The server name to delegate sliding sync communications to, with optional port.
// Requires `well_known_client_name` to also be configured.
WellKnownSlidingSyncProxy string `yaml:"well_known_sliding_sync_proxy"`
// Disables federation. Dendrite will not be able to make any outbound HTTP requests
// to other servers and the federation API will not be exposed.
DisableFederation bool `yaml:"disable_federation"`
// Configures the handling of presence events.
Presence PresenceOptions `yaml:"presence"`
// List of domains that the server will trust as identity servers to
// verify third-party identifiers.
// Defaults to an empty array.
TrustedIDServers []string `yaml:"trusted_third_party_id_servers"`
// JetStream configuration
JetStream JetStream `yaml:"jetstream"`
// Metrics configuration
Metrics Metrics `yaml:"metrics"`
// Sentry configuration
Sentry Sentry `yaml:"sentry"`
// DNS caching options for all outbound HTTP requests
DNSCache DNSCacheOptions `yaml:"dns_cache"`
// ServerNotices configuration used for sending server notices
ServerNotices ServerNotices `yaml:"server_notices"`
// ReportStats configures opt-in phone-home statistics reporting.
ReportStats ReportStats `yaml:"report_stats"`
// Configuration for the caches.
Cache Cache `yaml:"cache"`
}
func (c *Global) Defaults(opts DefaultOpts) {
if opts.Generate {
c.ServerName = "localhost"
c.PrivateKeyPath = "matrix_key.pem"
_, c.PrivateKey, _ = ed25519.GenerateKey(rand.New(rand.NewSource(0)))
c.KeyID = "ed25519:auto"
c.TrustedIDServers = []string{
"matrix.org",
"vector.im",
}
}
c.KeyValidityPeriod = time.Hour * 24 * 7
if opts.SingleDatabase {
c.DatabaseOptions.Defaults(90)
}
c.JetStream.Defaults(opts)
c.Metrics.Defaults(opts)
c.DNSCache.Defaults()
c.Sentry.Defaults()
c.ServerNotices.Defaults(opts)
c.ReportStats.Defaults()
c.Cache.Defaults()
}
func (c *Global) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
for _, v := range c.VirtualHosts {
v.Verify(configErrs)
}
c.JetStream.Verify(configErrs)
c.Metrics.Verify(configErrs)
c.Sentry.Verify(configErrs)
c.DNSCache.Verify(configErrs)
c.ServerNotices.Verify(configErrs)
c.ReportStats.Verify(configErrs)
c.Cache.Verify(configErrs)
}
func (c *Global) IsLocalServerName(serverName spec.ServerName) bool |
func (c *Global) SplitLocalID(sigil byte, id string) (string, spec.ServerName, error) {
u, s, err := gomatrixserverlib.SplitID(sigil, id)
if err != nil {
return u, s, err
}
if !c.IsLocalServerName(s) {
return u, s, fmt.Errorf("server name %q not known", s)
}
return u, s, nil
}
func (c *Global) VirtualHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
}
return nil
}
func (c *Global) VirtualHostForHTTPHost(serverName spec.ServerName) *VirtualHost {
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return v
}
for _, h := range v.MatchHTTPHosts {
if h == serverName {
return v
}
}
}
return nil
}
func (c *Global) SigningIdentityFor(serverName spec.ServerName) (*fclient.SigningIdentity, error) {
for _, id := range c.SigningIdentities() {
if id.ServerName == serverName {
return id, nil
}
}
return nil, fmt.Errorf("no signing identity for %q", serverName)
}
func (c *Global) SigningIdentities() []*fclient.SigningIdentity {
identities := make([]*fclient.SigningIdentity, 0, len(c.VirtualHosts)+1)
identities = append(identities, &c.SigningIdentity)
for _, v := range c.VirtualHosts {
identities = append(identities, &v.SigningIdentity)
}
return identities
}
type VirtualHost struct {
// Signing identity contains the server name, private key and key ID of
// the virtual host.
fclient.SigningIdentity `yaml:",inline"`
// Path to the private key. If not specified, the default global private key
// will be used instead.
PrivateKeyPath Path `yaml:"private_key"`
// How long a remote server can cache our server key for before requesting it again.
// Increasing this number will reduce the number of requests made by remote servers
// for our key, but increases the period a compromised key will be considered valid
// by remote servers.
// Defaults to 24 hours.
KeyValidityPeriod time.Duration `yaml:"key_validity_period"`
// Match these HTTP Host headers on the `/key/v2/server` endpoint, this needs
// to match all delegated names, likely including the port number too if
// the well-known delegation includes that also.
MatchHTTPHosts []spec.ServerName `yaml:"match_http_hosts"`
// Is registration enabled on this virtual host?
AllowRegistration bool `yaml:"allow_registration"`
// Is guest registration enabled on this virtual host?
AllowGuests bool `yaml:"allow_guests"`
}
func (v *VirtualHost) Verify(configErrs *ConfigErrors) {
checkNotEmpty(configErrs, "virtual_host.*.server_name", string(v.ServerName))
}
// RegistrationAllowed returns two bools, the first states whether registration
// is allowed for this virtual host and the second states whether guests are
// allowed for this virtual host.
func (v *VirtualHost) RegistrationAllowed() (bool, bool) {
if v == nil {
return false, false
}
return v.AllowRegistration, v.AllowGuests
}
type OldVerifyKeys struct {
// Path to the private key.
PrivateKeyPath Path `yaml:"private_key"`
// The private key itself.
PrivateKey ed25519.PrivateKey `yaml:"-"`
// The public key, in case only that part is known.
PublicKey spec.Base64Bytes `yaml:"public_key"`
// The key ID of the private key.
KeyID gomatrixserverlib.KeyID `yaml:"key_id"`
// When the private key was designed as "expired", as a UNIX timestamp
// in millisecond precision.
ExpiredAt spec.Timestamp `yaml:"expired_at"`
}
// The configuration to use for Prometheus metrics
type Metrics struct {
// Whether or not the metrics are enabled
Enabled bool `yaml:"enabled"`
// Use BasicAuth for Authorization
BasicAuth struct {
// Authorization via Static Username & Password
// Hardcoded Username and Password
Username string `yaml:"username"`
Password string `yaml:"password"`
} `yaml:"basic_auth"`
}
func (c *Metrics) Defaults(opts DefaultOpts) {
c.Enabled = false
if opts.Generate {
c.BasicAuth.Username = "metrics"
c.BasicAuth.Password = "metrics"
}
}
func (c *Metrics) Verify(configErrs *ConfigErrors) {
}
// ServerNotices defines the configuration used for sending server notices
type ServerNotices struct {
Enabled bool `yaml:"enabled"`
// The localpart to be used when sending notices
LocalPart string `yaml:"local_part"`
// The displayname to be used when sending notices
DisplayName string `yaml:"display_name"`
// The avatar of this user
AvatarURL string `yaml:"avatar_url"`
// The roomname to be used when creating messages
RoomName string `yaml:"room_name"`
}
func (c *ServerNotices) Defaults(opts DefaultOpts) {
if opts.Generate {
c.Enabled = true
c.LocalPart = "_server"
c.DisplayName = "Server Alert"
c.RoomName = "Server Alert"
c.AvatarURL = ""
}
}
func (c *ServerNotices) Verify(errors *ConfigErrors) {}
type Cache struct {
EstimatedMaxSize DataUnit `yaml:"max_size_estimated"`
MaxAge time.Duration `yaml:"max_age"`
}
func (c *Cache) Defaults() {
c.EstimatedMaxSize = 1024 * 1024 * 1024 // 1GB
c.MaxAge = time.Hour
}
func (c *Cache) Verify(errors *ConfigErrors) {
checkPositive(errors, "max_size_estimated", int64(c.EstimatedMaxSize))
}
// ReportStats configures opt-in phone-home statistics reporting.
type ReportStats struct {
// Enabled configures phone-home statistics of the server
Enabled bool `yaml:"enabled"`
| {
if c.ServerName == serverName {
return true
}
for _, v := range c.VirtualHosts {
if v.ServerName == serverName {
return true
}
}
return false
} | identifier_body |
lib.rs | /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Storage::<T>::set(storage);
Self::deposit_event(Event::StorageLimitSet { storage });
Ok(())
}
}
impl<T: Config> Pallet<T> {
/// Waste at most the remaining proof size of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_proof_size_some(n));
(0..n).for_each(|i| {
TrashData::<T>::get(i);
});
}
/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_proof_size_some(0);
let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);
let remaining = meter.remaining().saturating_sub(base);
let iter_by_proof_size =
remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;
if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
Ok(iter_by_proof_size as u32)
} else | {
Err(())
} | conditional_block |
|
lib.rs | pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult |
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure | {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
} | identifier_body |
lib.rs | pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else { | }
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or | (new_count..current_count).for_each(TrashData::<T>::remove); | random_line_split |
lib.rs | pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn | (origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin | set_storage | identifier_name |
house.go |
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从用户请求中获取到图片数据
fileData,hd,err:=this.GetFile("house_image")
defer fileData.Close() //获取完后等程序执行完后关掉连接
//beego.Info("========",fileData,hd,err)
//没拿到图片
if fileData==nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//2.将用户二进制数据存到fdfs中。得到fileid
suffix:=path.Ext(hd.Filename)
//判断上传文件的合法性
if suffix!=".jpg"&&suffix!=".png"&&suffix!=".gif"&&suffix!=".jpeg"{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//去掉.
suffixStr:=suffix[1:]
//创建hd.Size大小的[]byte数组用来存放fileData.Read读出来的[]byte数据
fileBuffer:=make([]byte,hd.Size)
//读出的数据存到[]byte数组中
_,err=fileData.Read(fileBuffer)
if err!=nil{
resp["errno"]=models.RECODE_IOERR
resp["errmsg"]=models.RecodeText(models.RECODE_IOERR)
return
}
//将图片上传到fdfs获取到fileid
uploadResponse,err:=UploadByBuffer(fileBuffer,suffixStr)
//3.从请求的url中获得house_id
house_id:=this.Ctx.Input.Param(":id")
//4.查看该房屋的index_image_url主图是否为空
house:=models.House{} //打开house结构体
//house结构体拿到houseid数据
house.Id,_=strconv.Atoi(house_id)
o:=orm.NewOrm() //创建orm
errRead:=o.Read(&house) //读取house数据库where user.id
if errRead!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//查询index_image_url是否为空
//为空则更将fileid路径赋值给index_image_url
if house.Index_image_url==""{
house.Index_image_url=uploadResponse.RemoteFileId
}
//5.主图不为空,将该图片的fileid字段追加(关联查询)到houseimage字段中插入到house_image表中,并拿到了HouseImage,里面也有数据了
//HouseImage功能就是如果主图有了,就追加其它图片的。
house_image:=models.HouseImage{House:&house,Url:uploadResponse.RemoteFileId}
//将house_image和house相关联,往house.Images里追加附加图片,可以追加多个
house.Images=append(house.Images,&house_image)//向把HouseImage对象的数据添加到house.Images
//将house_image入库,插入到house_image表中
if _,err:=o.Insert(&house_image);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//将house更新入库,插入到house中
if _,err:=o.Update(&house);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//6.拼接完整域名url_fileid
respData:=make(map[string]string)
respData["url"]=utils.AddDomain2Url(uploadResponse.RemoteFileId)
//7.返回给前端json
resp["data"]=respData
}
//请求首页房源
func (this *HouseController) GetHouseIndex() {
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
var respData []interface{}
beego.Debug("Index Houses....")
//1 从缓存服务器中请求 "home_page_data" 字段,如果有值就直接返回
//先从缓存中获取房屋数据,将缓存数据返回前端即可
//连接redis需要的参数信息
redis_config_map:=map[string]string{
"key":"lovehome",
"conn":utils.G_redis_addr+":"+utils.G_redis_port,
"dbNum":utils.G_redis_dbnum,
}
//把参数信息转成json格式
redis_config,_:=json.Marshal(redis_config_map)
//连接redis
cache_conn,err:=cache.NewCache("redis",string(redis_config))
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//设置key
house_page_key:="home_page_data"
//上传房源数据到指定key中
house_page_value:=cache_conn.Get(house_page_key)
//返回给前端json数据
if house_page_value!=nil{
beego.Debug("======= get house page info from CACHE!!! ========")
json.Unmarshal(house_page_value.([]byte),&respData)
resp["data"]=respData
return
}
//2 如果缓存没有,需要从数据库中查询到房屋列表
//取出house对象
houses:=[]models.House{}
o:=orm.NewOrm()
//查询数据库中所有房子信息
if _,err:=o.QueryTable("house").Limit(models.HOME_PAGE_MAX_HOUSES).All(&houses);err==nil{
//循环遍历这些房子及关联表查询
for _,house:=range houses{
//o.LoadRelated(&house, "Area")
//o.LoadRelated(&house, "User")
//o.LoadRelated(&house, "Images")
//o.LoadRelated(&house, "Facilities")
//用下面方法查到的部分房子信息追加到respData数组中
respData=append(respData,house.To_house_info())
}
}
//将data存入缓存中
house_page_value,_=json.Marshal(respData)
cache_conn.Put(house_page_key,house_page_value,3600*time.Second)
//返回前端data
resp["data"]=respData
return
}
func (this *HouseController) GetHouseSearchData() {
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
respData:=make(map[string]interface{})
defer this.RetData(resp)
//1.获取用户发来的参数,aid,sd,ed,sk,p
var aid int
this.Ctx.Input.Bind(&aid,"aid")
var sd string
this.Ctx.Input.Bind(&sd,"sd")
var ed string
this.Ctx.Input.Bind(&ed,"ed")
var sk string
this.Ctx.Input.Bind(&sk,"sk")
var page int
this.Ctx.Input.Bind(&page,"p")
//fmt.Printf("aid = %d,sd = %s,ed =%s,sk =%s,p =%d,==============\n",aid,sd,ed,sk,page)
//2.检验开始时间一定要早于结束时间
//将日期转成指定格式
start_time,_:=time.Parse("2006-01-02 15:04:05",sd+" 00:00:00")
end_time,_:=time.Parse("2006-01-02 15:04:05",ed+" 00:00:00")
if end_time.Before(start_time){ //如果end在start之前,返回错误信息
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]="结束时间必须在开始时间之前"
return
}
//fmt.Printf("##############start_date_time = %v,end_date_time = %v",start_time,end_time)
//3.判断p的合法性,一定要大于0的整数
if page<=0{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]="页数不能小于或等于0"
return
}
//4.尝试从缓存中获取数据,返回查询结果json
//定义一个key,注意这个存入redis中的key值拼接字符串,一定要用strconv.Itoa()转换,不要用string(),否则会出现\x01的效果,读取不了
house_search_key:="house_search_"+strconv.Itoa(aid)
//配置redis连接信息
redis_config_map:=map[string]string{
"key":"lovehome",
"conn":utils.G_redis_addr+":"+utils.G_redis_port,
"dbNum":utils.G_redis_dbnum,
}
//转成json
redis_config,_:=json.Marshal(redis_config_map)
//连接redis
cache_conn, err := cache.NewCache("redis", string(redis_config))
if err!=nil{ | resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
} | random_line_split |
|
house.go | \n",reqData)
if &reqData==nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//3.把房源数据插入到house结构体中
house:=models.House | le=reqData.Title
house.Price,_=strconv.Atoi(reqData.Price)
house.Price=house.Price*100
house.Address=reqData.Address
house.Room_count,_=strconv.Atoi(reqData.Room_count)
house.Acreage,_=strconv.Atoi(reqData.Acreage)
house.Unit=reqData.Unit
house.Beds=reqData.Beds
house.Capacity,_=strconv.Atoi(reqData.Capacity)
house.Deposit,_=strconv.Atoi(reqData.Deposit)
house.Deposit=house.Deposit*100
house.Max_days,_=strconv.Atoi(reqData.Max_days)
house.Min_days,_=strconv.Atoi(reqData.Min_days)
//获取用户的id,通过GetSession方式
user:=models.User{Id:this.GetSession("user_id").(int)}
house.User=&user
//4.处理Area城区
//把取到的area_id转成int
area_id,_:=strconv.Atoi(reqData.Area_id)
//把area_id赋值到结构体Id字段中
area:=models.Area{Id:area_id}
//再把Area结构体数据赋值给house结构体中的Area
//把结构体赋值必须用取地址符&
//这是一对多操作,一个房子只能在一个地区,一个地区可以有多个房子
house.Area=&area
//5.获取到house_id
//创建一个orm对象
o:=orm.NewOrm()
//把部分house数据插入到数据库中,得到house_id
house_id,err:=o.Insert(&house)
if err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
beego.Debug("house insert id =", house_id, " succ!")
//5.多对多m2m插入,将facilities 一起关联插入到表中
//定义一个设施的结构体数组,先把用户选的多个设施获取到
facilitles:=[]models.Facility{}
//遍历用户发来的设施列表,取出fid.
for _,fid:=range reqData.Facilities{
f_id,_:=strconv.Atoi(fid) //把string转成int
fac:=models.Facility{Id:f_id} //更新每个设备的id
facilitles=append(facilitles,fac) //将每个设备id追加成设施数组
}
//注意,只要house里有house_id后才能用QueryM2M,第一个参数是需要修改的哪个表,我这次要改house表,首先house表里一定要有一个house.Id,然后house.Id没有关联的设施信息,第二个参数为要修改的数据。
//这句的意思其实就是将房屋设施数据插放到house结构体中的Facilities字段所关联的表的字段中
//看下面Facility关联着House,rel(m2m)多对多关系。自然而然的就会将数据插入到关联表中。而这个关联表就是facility_houses
/*
type Facility struct {
Id int `json:"fid"` //设施编号
Name string `orm:"size(32)"` //设施名字
Houses [] *House `orm:"rel(m2m)"` //都有哪些房屋有此设施
}
*/
m2m:=o.QueryM2M(&house,"Facilities")
//得到m2m对象后,我们就可以把刚才获取到的用户设施数组facilitles加到facility_houses中了
num,errM2M:=m2m.Add(facilitles)
if errM2M!=nil||num==0{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
beego.Debug("house m2m facility insert num =", num, " succ!")
//6.返回json和house_id,有id返回说明插入成功,0就说明没成功
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
//创建一个map用来存house_id
respData:=make(map[string]interface{})
respData["house_id"]=house_id
//把house_id的map存到data中,再打包成json
resp["data"]=respData
}
//请求房源详细信息
func (this *HouseController) GetDetailHouseData() {
//用来存json数据的
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从session获取user_id
user_id:=this.GetSession("user_id")
//2.从请求的url中得到房屋id
//Param中的id值可以随便换,但要是router中的对应
house_id:=this.Ctx.Input.Param(":id")
//转换一下interface{}转成int
h_id,err:=strconv.Atoi(house_id)
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//3.从redis缓存获取当前房屋的数据,如果有该房屋,则直接返回正确的json
redis_config_map:=map[string]string{
"key":"lovehome",
"conn":utils.G_redis_addr+":"+utils.G_redis_port,
"dbNum":utils.G_redis_dbnum,
}
redis_config,_:=json.Marshal(redis_config_map)
cache_conn, err := cache.NewCache("redis", string(redis_config))
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//先把house有的东西返回给house的json
respData:=make(map[string]interface{})
//设置一个变量,每个房子插入redis不能一样,容易覆盖,所以用house_id做为key,比如lovehome:1,lovehome:2
house_page_key:=house_id
house_info_value:=cache_conn.Get(house_page_key)
if house_info_value!=nil{
beego.Debug("======= get house info desc from CACHE!!! ========")
//返回json的user_id
respData["user_id"]=user_id
//返回json的house信息
house_info:=make(map[string]interface{})
//解码json并存到house_info里
json.Unmarshal(house_info_value.([]byte),&house_info)
//将house_info的map返回json的house给前端
respData["house"]=house_info
resp["data"]=respData
return
}
//4.如果缓存没有房屋数据,那么从数据库中获取数据,再存入缓存中,然后返回给前端
o:=orm.NewOrm()
// --- 载入关系查询 -----
house:=models.House{Id:h_id}
//把房子信息读出来
if err:= o.Read(&house);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//5.关联查询area,user,images,facilities等表
o.LoadRelated(&house,"Area")
o.LoadRelated(&house,"User")
o.LoadRelated(&house,"Images")
o.LoadRelated(&house,"Facilities")
//6.将房屋详细的json数据存放redis缓存中
house_info_value,_=json.Marshal(house.To_one_house_desc())
cache_conn.Put(house_page_key,house_info_value,3600*time.Second)
//7.返回json数据给前端。
respData["house"]=house.To_one_house_desc()
respData["user_id"]=user_id
resp["data"]=respData
}
//上传房源图片
func (this *HouseController) UploadHouseImage() {
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从用户请求中获取到图片数据
fileData,hd,err:=this.GetFile("house_image")
defer fileData.Close() //获取完后等程序执行完后关掉连接
//beego.Info("========",fileData,hd,err)
//没拿到图片
if fileData==nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//2.将用户二进制数据存到fdfs中。得到fileid
suffix:=path.Ext(hd.Filename)
//判断上传文件的合法性
if suffix!=".jpg"&&suffix!=".png"&&suffix!=".gif"&&suffix!=".jpeg"{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RE | {}
house.Tit | identifier_name |
house.go | ecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从session获取用户的user_id
user_id:=this.GetSession("user_id")
if user_id==nil{
resp["errno"]=models.RECODE_SESSIONERR
resp["errmsg"]=models.RecodeText(models.RECODE_SESSIONERR)
return
}
//2.从数据库中拿到user_id对应的house数据
//这里先拿到house结构体对象
/*
这里需要注意,因为我们需要查询的是user_id所有的房屋信息,这个用户可能会有多套房,所以我们存房屋信息的结构体要用数组
*/
//select * from house where user.id=user_id
//将house相关联的User和Area一并查询
houses:=[]models.House{} //必须用数组
o:=orm.NewOrm()
//查询house表
qs:=o.QueryTable("house")
//查询user_id=user_id的人的all房子存在houses数组中,将house相关联的User和Area一并查询
num,err:=qs.Filter("user__id",user_id.(int)).RelatedSel().All(&houses)
if err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
if num==0{
resp["errno"]=models.RECODE_NODATA
resp["errmsg"]=models.RecodeText(models.RECODE_NODATA)
return
}
//遍历所有房源。并添加到数组中
var houses_rep []interface{}
for _,houseinfo:=range houses{
fmt.Printf("house.user = %+v\n", houseinfo.User)
fmt.Printf("house.area = %+v\n", houseinfo.Area)
houses_rep=append(houses_rep,houseinfo.To_house_info())
}
fmt.Printf("houses_rep = %+v\n", houses_rep)
//3.返回打包好的json数据
//创建一个map用来存房源数据
respData:=make(map[string]interface{})
//将数据库里查到的所有房子数组存到这个map中
respData["houses"]=houses_rep
//将这个map再传到data里,返回json
resp["data"]=respData
}
//发布房源信息
func (this *HouseController) PostHouseData() {
//用来存json数据的
resp:=make(map[string]interface{})
defer this.RetData(resp)
//1.解析用户发过来的房源数据,得到房源信息
//先创建一个结构体用来放用户发过来的数据
var reqData HouseInfo
if err:=json.Unmarshal(this.Ctx.Input.RequestBody,&reqData);err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//2.判断数据的合法性
fmt.Printf("%+v\n",reqData)
if &reqData==nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//3.把房源数据插入到house结构体中
house:=models.House{}
house.Title=reqData.Title
house.Price,_=strconv.Atoi(reqData.Price)
house.Price=house.Price*100
house.Address=reqData.Address
house.Room_count,_=strconv.Atoi(reqData.Room_count)
house.Acreage,_=strconv.Atoi(reqData.Acreage)
house.Unit=reqData.Unit
house.Beds=reqData.Beds
house.Capacity,_=strconv.Atoi(reqData.Capacity)
house.Deposit,_=strconv.Atoi(reqData.Deposit)
house.Deposit=house.Deposit*100
house.Max_days,_=strconv.Atoi(reqData.Max_days)
house.Min_days,_=strconv.Atoi(reqData.Min_days)
//获取用户的id,通过GetSession方式
user:=models.User{Id:this.GetSession("user_id").(int)}
house.User=&user
//4.处理Area城区
//把取到的area_id转成int
area_id,_:=strconv.Atoi(reqData.Area_id)
//把area_id赋值到结构体Id字段中
area:=models.Area{Id:area_id}
//再把Area结构体数据赋值给house结构体中的Area
//把结构体赋值必须用取地址符&
//这是一对多操作,一个房子只能在一个地区,一个地区可以有多个房子
house.Area=&area
//5.获取到house_id
//创建一个orm对象
o:=orm.NewOrm()
//把部分house数据插入到数据库中,得到house_id
house_id,err:=o.Insert(&house)
if err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
beego.Debug("house insert id =", house_id, " succ!")
//5.多对多m2m插入,将facilities 一起关联插入到表中
//定义一个设施的结构体数组,先把用户选的多个设施获取到
facilitles:=[]models.Facility{}
//遍历用户发来的设施列表,取出fid.
for _,fid:=range reqData.Facilities{
f_id,_:=strconv.Atoi(fid) //把string转成int
fac:=models.Facility{Id:f_id} //更新每个设备的id
facilitles=append(facilitles,fac) //将每个设备id追加成设施数组
}
//注意,只要house里有house_id后才能用QueryM2M,第一个参数是需要修改的哪个表,我这次要改house表,首先house表里一定要有一个house.Id,然后house.Id没有关联的设施信息,第二个参数为要修改的数据。
//这句的意思其实就是将房屋设施数据插放到house结构体中的Facilities字段所关联的表的字段中
//看下面Facility关联着House,rel(m2m)多对多关系。自然而然的就会将数据插入到关联表中。而这个关联表就是facility_houses
/*
type Facility struct {
Id int `json:"fid"` //设施编号
Name string `orm:"size(32)"` //设施名字
Houses [] *House `orm:"rel(m2m)"` //都有哪些房屋有此设施
}
*/
m2m:=o.QueryM2M(&house,"Facilities")
//得到m2m对象后,我们就可以把刚才获取到的用户设施数组facilitles加到facility_houses中了
num,errM2M:=m2m.Add(facilitles)
if errM2M!=nil||num==0{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
beego.Debug("house m2m facility insert num =", num, " succ!")
//6.返回json和house_id,有id返回说明插入成功,0就说明没成功
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
//创建一个map用来存house_id
respData:=make(map[string]interface{})
respData["house_id"]=house_id
//把house_id的map存到data中,再打包成json
resp["data"]=respData
}
//请求房源详细信息
func (this *HouseController) GetDetailHouseData() {
//用来存json数据的
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从session获取user_id
user_id:=this.GetSession("user_id")
//2.从请求的url中得到房屋id
//Param中的id值可以随便换,但要是router中的对应
house_id:=this.Ctx.Input.Param(":id")
//转换一下interface{}转成int
h_id,err:=strconv.Atoi(house_id)
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//3.从redis缓存获取当前房屋的数据,如果有该房屋,则直接返回正确的json
redis_config_map:=map[string]string{
"key":"lovehome",
"conn":utils.G_redis_addr+":"+utils.G_redis_port,
"dbNum":utils.G_redis_dbnum,
}
redis_config,_:=json.Marshal(redis_config_map)
cache_conn, err := cache.NewCache("redis", string(redis_config))
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//先把house有的东西返回给house的json
respData:=make(map[string]interface{})
//设置一个变量,每个房子插入redis不能一样,容易覆盖,所以用house_id做为key,比如lovehome:1,lovehome:2
house_page_key:=house_id
house_info_value:=cache_conn.Get(house_page_key)
if house_info_value!=nil{
beego.Debug("======= get house info desc from CACHE | rno"]=models.RECODE_OK
resp["errmsg"]=models.R | identifier_body |
|
house.go | �数组
}
//注意,只要house里有house_id后才能用QueryM2M,第一个参数是需要修改的哪个表,我这次要改house表,首先house表里一定要有一个house.Id,然后house.Id没有关联的设施信息,第二个参数为要修改的数据。
//这句的意思其实就是将房屋设施数据插放到house结构体中的Facilities字段所关联的表的字段中
//看下面Facility关联着House,rel(m2m)多对多关系。自然而然的就会将数据插入到关联表中。而这个关联表就是facility_houses
/*
type Facility struct {
Id int `json:"fid"` //设施编号
Name string `orm:"size(32)"` //设施名字
Houses [] *House `orm:"rel(m2m)"` //都有哪些房屋有此设施
}
*/
m2m:=o.QueryM2M(&house,"Facilities")
//得到m2m对象后,我们就可以把刚才获取到的用户设施数组facilitles加到facility_houses中了
num,errM2M:=m2m.Add(facilitles)
if errM2M!=nil||num==0{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
beego.Debug("house m2m facility insert num =", num, " succ!")
//6.返回json和house_id,有id返回说明插入成功,0就说明没成功
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
//创建一个map用来存house_id
respData:=make(map[string]interface{})
respData["house_id"]=house_id
//把house_id的map存到data中,再打包成json
resp["data"]=respData
}
//请求房源详细信息
func (this *HouseController) GetDetailHouseData() {
//用来存json数据的
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从session获取user_id
user_id:=this.GetSession("user_id")
//2.从请求的url中得到房屋id
//Param中的id值可以随便换,但要是router中的对应
house_id:=this.Ctx.Input.Param(":id")
//转换一下interface{}转成int
h_id,err:=strconv.Atoi(house_id)
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//3.从redis缓存获取当前房屋的数据,如果有该房屋,则直接返回正确的json
redis_config_map:=map[string]string{
"key":"lovehome",
"conn":utils.G_redis_addr+":"+utils.G_redis_port,
"dbNum":utils.G_redis_dbnum,
}
redis_config,_:=json.Marshal(redis_config_map)
cache_conn, err := cache.NewCache("redis", string(redis_config))
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//先把house有的东西返回给house的json
respData:=make(map[string]interface{})
//设置一个变量,每个房子插入redis不能一样,容易覆盖,所以用house_id做为key,比如lovehome:1,lovehome:2
house_page_key:=house_id
house_info_value:=cache_conn.Get(house_page_key)
if house_info_value!=nil{
beego.Debug("======= get house info desc from CACHE!!! ========")
//返回json的user_id
respData["user_id"]=user_id
//返回json的house信息
house_info:=make(map[string]interface{})
//解码json并存到house_info里
json.Unmarshal(house_info_value.([]byte),&house_info)
//将house_info的map返回json的house给前端
respData["house"]=house_info
resp["data"]=respData
return
}
//4.如果缓存没有房屋数据,那么从数据库中获取数据,再存入缓存中,然后返回给前端
o:=orm.NewOrm()
// --- 载入关系查询 -----
house:=models.House{Id:h_id}
//把房子信息读出来
if err:= o.Read(&house);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//5.关联查询area,user,images,facilities等表
o.LoadRelated(&house,"Area")
o.LoadRelated(&house,"User")
o.LoadRelated(&house,"Images")
o.LoadRelated(&house,"Facilities")
//6.将房屋详细的json数据存放redis缓存中
house_info_value,_=json.Marshal(house.To_one_house_desc())
cache_conn.Put(house_page_key,house_info_value,3600*time.Second)
//7.返回json数据给前端。
respData["house"]=house.To_one_house_desc()
respData["user_id"]=user_id
resp["data"]=respData
}
//上传房源图片
func (this *HouseController) UploadHouseImage() {
resp:=make(map[string]interface{})
resp["errno"]=models.RECODE_OK
resp["errmsg"]=models.RecodeText(models.RECODE_OK)
defer this.RetData(resp)
//1.从用户请求中获取到图片数据
fileData,hd,err:=this.GetFile("house_image")
defer fileData.Close() //获取完后等程序执行完后关掉连接
//beego.Info("========",fileData,hd,err)
//没拿到图片
if fileData==nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
if err!=nil{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//2.将用户二进制数据存到fdfs中。得到fileid
suffix:=path.Ext(hd.Filename)
//判断上传文件的合法性
if suffix!=".jpg"&&suffix!=".png"&&suffix!=".gif"&&suffix!=".jpeg"{
resp["errno"]=models.RECODE_REQERR
resp["errmsg"]=models.RecodeText(models.RECODE_REQERR)
return
}
//去掉.
suffixStr:=suffix[1:]
//创建hd.Size大小的[]byte数组用来存放fileData.Read读出来的[]byte数据
fileBuffer:=make([]byte,hd.Size)
//读出的数据存到[]byte数组中
_,err=fileData.Read(fileBuffer)
if err!=nil{
resp["errno"]=models.RECODE_IOERR
resp["errmsg"]=models.RecodeText(models.RECODE_IOERR)
return
}
//将图片上传到fdfs获取到fileid
uploadResponse,err:=UploadByBuffer(fileBuffer,suffixStr)
//3.从请求的url中获得house_id
house_id:=this.Ctx.Input.Param(":id")
//4.查看该房屋的index_image_url主图是否为空
house:=models.House{} //打开house结构体
//house结构体拿到houseid数据
house.Id,_=strconv.Atoi(house_id)
o:=orm.NewOrm() //创建orm
errRead:=o.Read(&house) //读取house数据库where user.id
if errRead!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//查询index_image_url是否为空
//为空则更将fileid路径赋值给index_image_url
if house.Index_image_url==""{
house.Index_image_url=uploadResponse.RemoteFileId
}
//5.主图不为空,将该图片的fileid字段追加(关联查询)到houseimage字段中插入到house_image表中,并拿到了HouseImage,里面也有数据了
//HouseImage功能就是如果主图有了,就追加其它图片的。
house_image:=models.HouseImage{House:&house,Url:uploadResponse.RemoteFileId}
//将house_image和house相关联,往house.Images里追加附加图片,可以追加多个
house.Images=append(house.Images,&house_image)//向把HouseImage对象的数据添加到house.Images
//将house_image入库,插入到house_image表中
if _,err:=o.Insert(&house_image);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=models.RecodeText(models.RECODE_DBERR)
return
}
//将house更新入库,插入到house中
if _,err:=o.Update(&house);err!=nil{
resp["errno"]=models.RECODE_DBERR
resp["errmsg"]=mode | ls.RecodeText(models.RECODE_DBERR)
return
}
//6.拼接完整域名url_fileid
respData:=make(map[string]string)
re | conditional_block |
|
SearchEngine.py |
elif i%2 == 0:
if i > 2:
os.remove("temp2.json")
f = open("temp2.json", 'w')
f1 = open(files[i])
f2 = open(files[i+1])
line1 = f1.readline()
line2 = f2.readline()
while(line1 != "" or line2 != ""):
jsonObj1 = {"~~~": ""} if line1 == "" else json.loads(line1) # highest ASCII val
jsonObj2 = {"~~~": ""} if line2 == "" else json.loads(line2) # highest ASCII val
key1, key2 = list(jsonObj1.keys())[0], list(jsonObj2.keys())[0]
if key1 == key2:
jsonObj1[key1].update(jsonObj2[key2])
updated_posting = jsonObj1
line1 = f1.readline()
line2 = f2.readline()
elif key1 < key2:
updated_posting = {key1:jsonObj1[key1]}
line1 = f1.readline()
elif key1 > key2:
updated_posting = {key2:jsonObj2[key2]}
line2 = f2.readline()
else:
print(f"key1: {key1}\tkey2: {key2}")
raise Exception("Error error error")
json.dump(updated_posting, f)
f.write("\n")
f1.close()
f2.close()
f.close()
if i%2 == 1:
files[i+1] = "temp1.json"
elif i%2 == 0:
files[i+1] = "temp2.json"
'''
-makes the inverted index
-For the sake of my computer's well being, the index is partitioned off into
4 pieces and is later merged.
'''
def makeIndex():
ID = 0
ID_url_dict = dict()
doc_count = 56000
doc_counter = 0 #used for the partial indexes
titles_set = defaultdict(set)
headings_set = defaultdict(set)
bold_set = defaultdict(set)
important_sets = [titles_set, bold_set, headings_set]
partitions = 4
num_partitions = 1
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
inverted_index = dict()
#simhash_dict = dict()
for directory in os.listdir(os.getcwd() + "\\DEV"):
path = os.getcwd() + "\\DEV\\" + directory
try:
os.listdir(path)
except:
continue
for file in os.listdir(path):
with open(path + '\\' + file) as f:
data = json.loads(f.read())
soup = bs(data['content'], 'lxml') # parse using bs and lxml
#simhash_file = list() # contains all the hashed words of the documentuih
try:
body = soup.body.get_text()
except:
body = ""
words = tokenizer.tokenize(body) # tokenize the body
for w in words: # stemming
word = ps.stem(w.lower().rstrip())
#simhash_file.append(sim_hashfn(word))
try:
inverted_index[word][ID] += 1
except KeyError:
try:
inverted_index[word].update({ID:1})
except KeyError:
inverted_index[word] = {ID:1}
titles = soup.find_all('title') # find all title tags
bolds = soup.find_all(re.compile(r'.*^(?:b|strong)$')) # find all bold tags
headers = soup.find_all(re.compile('^h[1-6]$')) # find all heading tags
# tokenize all the important/title words
important_words = [titles, bolds, headers]
for i in range(3):
if important_words[i] == None:
continue
for important_word in important_words[i]:
if important_word.string == None:
continue
for word in tokenizer.tokenize(important_word.string):
w = ps.stem(word.lower().rstrip())
important_sets[i][ID].add(w)
# partial index dump (STILL IN THE WORKS)
if doc_counter > doc_count/partitions:
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
num_partitions += 1
doc_counter = 0
ID_url_dict[ID] = data['url']
#simhash_dict.update({ID:simhash_file})
ID += 1
doc_counter += 1
print(ID)
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
with open("info.p", 'wb') as f: # save these informations for later
pickle.dump((ID_url_dict,important_sets) , f)
print("done serializing\n")
'''
1. goes through index line by line (we assume it's sorted here)
2. calculates the tf-idf while taking the important words into account
3. writes the {word: tf-idf} into a file and the {word: <file pointer -> tf-idf entry>} into another
4. also creates and writes simhash into a file
'''
def calculate_helpers():
with open("info.p", 'rb') as f:
info = pickle.load(f)
urls = info[0] # dict: ID --> url
important_sets = info[1] # list of dict(set)
N = len(urls)
simhash_dict = {}
f1 = open("merge_index.json")
f2 = open("tf_idf.json", 'w')
f3 = open("fp.json", 'w')
for jsonObj in f1:
obj = json.loads(jsonObj)
for word, postings in obj.items():
tf_idf = {} # will be dumping this every iteration for the sake of my poor RAM
for docID, appearances in postings.items():
# weighing based on important words
scale = 1
for i in range(3):
try:
if word in important_sets[i][docID]:
if i == 0: # is a title
scale *= 1.5
elif i == 1: # is a bold
scale *= 1.1
elif i == 2: # is a header
scale *= 1.2
else:
scale *= 1
except KeyError:
scale *= 1
tf = 1 + np.log10(appearances) # tf
idf = np.log10(N/len(postings)) # idf
score = tf*idf*scale
try:
tf_idf[word].update({docID: score})
except KeyError:
tf_idf[word] = ({docID: score})
try:
simhash_dict[docID].update({sim_hashfn(word): score})
except KeyError:
simhash_dict[docID] = {sim_hashfn(word): score}
json.dump({word:f2.tell()}, f3)
f3.write('\n')
json.dump(tf_idf, f2)
f2.write('\n')
f1.close()
f2.close()
f3.close()
# calculate simhash
f = open("simhash_scores.json", 'w')
for ID, hashed_words in simhash_dict.items():
simhash_score = ''
for i in range(64): # all hashed words are 64 bit binary strings
i_th_binary = 0 # the i-th binary value
for hashed_word, weight in hashed_words.items():
if hashed_word[i] == '0':
i_th_binary -= weight
elif hashed_word[i] == '1':
i_th_binary += weight
if i_th_binary > 0:
simhash_score += '1'
else:
simhash_score += '0'
json.dump({ID:simhash_score}, f)
f.write("\n")
f.close()
def query_prompt():
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
# start prompt
start = input("Enter 'start'(s) to get started or 'quit'(q) to quit: \n")
start = start.lower()
while(start != "start" and start != "s"):
if start == 'q' or start == 'quit':
exit()
start = input("Unknown command, try again: \n")
# loading auxillary structs
print("Getting ready...")
stop_words = set(stopwords.words("english"))
info = (pickle.load( open( "info.p", 'rb' ) ))
urls = info[0] # ID --> url dict
limit = 5 # limit of number of websites shown *-1* no limit
fp_dict = {}
simhash | if i > 2:
os.remove("temp1.json")
f = open("temp1.json", 'w') | conditional_block |
|
SearchEngine.py | line2 = f2.readline()
while(line1 != "" or line2 != ""):
jsonObj1 = {"~~~": ""} if line1 == "" else json.loads(line1) # highest ASCII val
jsonObj2 = {"~~~": ""} if line2 == "" else json.loads(line2) # highest ASCII val
key1, key2 = list(jsonObj1.keys())[0], list(jsonObj2.keys())[0]
if key1 == key2:
jsonObj1[key1].update(jsonObj2[key2])
updated_posting = jsonObj1
line1 = f1.readline()
line2 = f2.readline()
elif key1 < key2:
updated_posting = {key1:jsonObj1[key1]}
line1 = f1.readline()
elif key1 > key2:
updated_posting = {key2:jsonObj2[key2]}
line2 = f2.readline()
else:
print(f"key1: {key1}\tkey2: {key2}")
raise Exception("Error error error")
json.dump(updated_posting, f)
f.write("\n")
f1.close()
f2.close()
f.close()
if i%2 == 1:
files[i+1] = "temp1.json"
elif i%2 == 0:
files[i+1] = "temp2.json"
'''
-makes the inverted index
-For the sake of my computer's well being, the index is partitioned off into
4 pieces and is later merged.
'''
def makeIndex():
ID = 0
ID_url_dict = dict()
doc_count = 56000
doc_counter = 0 #used for the partial indexes
titles_set = defaultdict(set)
headings_set = defaultdict(set)
bold_set = defaultdict(set)
important_sets = [titles_set, bold_set, headings_set]
partitions = 4
num_partitions = 1
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
inverted_index = dict()
#simhash_dict = dict()
for directory in os.listdir(os.getcwd() + "\\DEV"):
path = os.getcwd() + "\\DEV\\" + directory
try:
os.listdir(path)
except:
continue
for file in os.listdir(path):
with open(path + '\\' + file) as f:
data = json.loads(f.read())
soup = bs(data['content'], 'lxml') # parse using bs and lxml
#simhash_file = list() # contains all the hashed words of the documentuih
try:
body = soup.body.get_text()
except:
body = ""
words = tokenizer.tokenize(body) # tokenize the body
for w in words: # stemming
word = ps.stem(w.lower().rstrip())
#simhash_file.append(sim_hashfn(word))
try:
inverted_index[word][ID] += 1
except KeyError:
try:
inverted_index[word].update({ID:1})
except KeyError:
inverted_index[word] = {ID:1}
titles = soup.find_all('title') # find all title tags
bolds = soup.find_all(re.compile(r'.*^(?:b|strong)$')) # find all bold tags
headers = soup.find_all(re.compile('^h[1-6]$')) # find all heading tags
# tokenize all the important/title words
important_words = [titles, bolds, headers]
for i in range(3):
if important_words[i] == None:
continue
for important_word in important_words[i]:
if important_word.string == None:
continue
for word in tokenizer.tokenize(important_word.string):
w = ps.stem(word.lower().rstrip())
important_sets[i][ID].add(w)
# partial index dump (STILL IN THE WORKS)
if doc_counter > doc_count/partitions:
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
num_partitions += 1
doc_counter = 0
ID_url_dict[ID] = data['url']
#simhash_dict.update({ID:simhash_file})
ID += 1
doc_counter += 1
print(ID)
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
with open("info.p", 'wb') as f: # save these informations for later
pickle.dump((ID_url_dict,important_sets) , f)
print("done serializing\n")
'''
1. goes through index line by line (we assume it's sorted here)
2. calculates the tf-idf while taking the important words into account
3. writes the {word: tf-idf} into a file and the {word: <file pointer -> tf-idf entry>} into another
4. also creates and writes simhash into a file
'''
def calculate_helpers():
with open("info.p", 'rb') as f:
info = pickle.load(f)
urls = info[0] # dict: ID --> url
important_sets = info[1] # list of dict(set)
N = len(urls)
simhash_dict = {}
f1 = open("merge_index.json")
f2 = open("tf_idf.json", 'w')
f3 = open("fp.json", 'w')
for jsonObj in f1:
obj = json.loads(jsonObj)
for word, postings in obj.items():
tf_idf = {} # will be dumping this every iteration for the sake of my poor RAM
for docID, appearances in postings.items():
# weighing based on important words
scale = 1
for i in range(3):
try:
if word in important_sets[i][docID]:
if i == 0: # is a title
scale *= 1.5
elif i == 1: # is a bold
scale *= 1.1
elif i == 2: # is a header
scale *= 1.2
else:
scale *= 1
except KeyError:
scale *= 1
tf = 1 + np.log10(appearances) # tf
idf = np.log10(N/len(postings)) # idf
score = tf*idf*scale
try:
tf_idf[word].update({docID: score})
except KeyError:
tf_idf[word] = ({docID: score})
try:
simhash_dict[docID].update({sim_hashfn(word): score})
except KeyError:
simhash_dict[docID] = {sim_hashfn(word): score}
json.dump({word:f2.tell()}, f3)
f3.write('\n')
json.dump(tf_idf, f2)
f2.write('\n')
f1.close()
f2.close()
f3.close()
# calculate simhash
f = open("simhash_scores.json", 'w')
for ID, hashed_words in simhash_dict.items():
simhash_score = ''
for i in range(64): # all hashed words are 64 bit binary strings
i_th_binary = 0 # the i-th binary value
for hashed_word, weight in hashed_words.items():
if hashed_word[i] == '0':
i_th_binary -= weight
elif hashed_word[i] == '1':
i_th_binary += weight
if i_th_binary > 0:
simhash_score += '1'
else:
simhash_score += '0'
json.dump({ID:simhash_score}, f)
f.write("\n")
f.close()
def query_prompt():
| for line in f:
fp_dict.update(json.loads(line)) # dict of the file pointers
with open("simhash_scores.json") as f:
for line in f:
simhash_scores.update(json.loads(line))
print(f"{len(fp_dict)} words in index")
f = open("tf_idf.json") # open our index
print | ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
# start prompt
start = input("Enter 'start'(s) to get started or 'quit'(q) to quit: \n")
start = start.lower()
while(start != "start" and start != "s"):
if start == 'q' or start == 'quit':
exit()
start = input("Unknown command, try again: \n")
# loading auxillary structs
print("Getting ready...")
stop_words = set(stopwords.words("english"))
info = (pickle.load( open( "info.p", 'rb' ) ))
urls = info[0] # ID --> url dict
limit = 5 # limit of number of websites shown *-1* no limit
fp_dict = {}
simhash_scores = {}
with open("fp.json") as f: | identifier_body |
SearchEngine.py | ():
for file in os.listdir(os.getcwd()):
if re.match(r'^P\d\.json$', file):
os.remove(file)
try:
os.remove("temp1.json")
except:
pass
try:
os.remove("temp2.json")
except:
pass
def merge_partitions():
files = []
for file in os.listdir(os.getcwd()):
if re.match(r'^P\d\.json$', file):
files.append(file)
for i in range(len(files)-1):
if i == len(files) - 2:
print("last file")
f = open("merge_index.json", 'w')
elif i%2 == 1:
if i > 2:
os.remove("temp1.json")
f = open("temp1.json", 'w')
elif i%2 == 0:
if i > 2:
os.remove("temp2.json")
f = open("temp2.json", 'w')
f1 = open(files[i])
f2 = open(files[i+1])
line1 = f1.readline()
line2 = f2.readline()
while(line1 != "" or line2 != ""):
jsonObj1 = {"~~~": ""} if line1 == "" else json.loads(line1) # highest ASCII val
jsonObj2 = {"~~~": ""} if line2 == "" else json.loads(line2) # highest ASCII val
key1, key2 = list(jsonObj1.keys())[0], list(jsonObj2.keys())[0]
if key1 == key2:
jsonObj1[key1].update(jsonObj2[key2])
updated_posting = jsonObj1
line1 = f1.readline()
line2 = f2.readline()
elif key1 < key2:
updated_posting = {key1:jsonObj1[key1]}
line1 = f1.readline()
elif key1 > key2:
updated_posting = {key2:jsonObj2[key2]}
line2 = f2.readline()
else:
print(f"key1: {key1}\tkey2: {key2}")
raise Exception("Error error error")
json.dump(updated_posting, f)
f.write("\n")
f1.close()
f2.close()
f.close()
if i%2 == 1:
files[i+1] = "temp1.json"
elif i%2 == 0:
files[i+1] = "temp2.json"
'''
-makes the inverted index
-For the sake of my computer's well being, the index is partitioned off into
4 pieces and is later merged.
'''
def makeIndex():
ID = 0
ID_url_dict = dict()
doc_count = 56000
doc_counter = 0 #used for the partial indexes
titles_set = defaultdict(set)
headings_set = defaultdict(set)
bold_set = defaultdict(set)
important_sets = [titles_set, bold_set, headings_set]
partitions = 4
num_partitions = 1
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
inverted_index = dict()
#simhash_dict = dict()
for directory in os.listdir(os.getcwd() + "\\DEV"):
path = os.getcwd() + "\\DEV\\" + directory
try:
os.listdir(path)
except:
continue
for file in os.listdir(path):
with open(path + '\\' + file) as f:
data = json.loads(f.read())
soup = bs(data['content'], 'lxml') # parse using bs and lxml
#simhash_file = list() # contains all the hashed words of the documentuih
try:
body = soup.body.get_text()
except:
body = ""
words = tokenizer.tokenize(body) # tokenize the body
for w in words: # stemming
word = ps.stem(w.lower().rstrip())
#simhash_file.append(sim_hashfn(word))
try:
inverted_index[word][ID] += 1
except KeyError:
try:
inverted_index[word].update({ID:1})
except KeyError:
inverted_index[word] = {ID:1}
titles = soup.find_all('title') # find all title tags
bolds = soup.find_all(re.compile(r'.*^(?:b|strong)$')) # find all bold tags
headers = soup.find_all(re.compile('^h[1-6]$')) # find all heading tags
# tokenize all the important/title words
important_words = [titles, bolds, headers]
for i in range(3):
if important_words[i] == None:
continue
for important_word in important_words[i]:
if important_word.string == None:
continue
for word in tokenizer.tokenize(important_word.string):
w = ps.stem(word.lower().rstrip())
important_sets[i][ID].add(w)
# partial index dump (STILL IN THE WORKS)
if doc_counter > doc_count/partitions:
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
num_partitions += 1
doc_counter = 0
ID_url_dict[ID] = data['url']
#simhash_dict.update({ID:simhash_file})
ID += 1
doc_counter += 1
print(ID)
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
with open("info.p", 'wb') as f: # save these informations for later
pickle.dump((ID_url_dict,important_sets) , f)
print("done serializing\n")
'''
1. goes through index line by line (we assume it's sorted here)
2. calculates the tf-idf while taking the important words into account
3. writes the {word: tf-idf} into a file and the {word: <file pointer -> tf-idf entry>} into another
4. also creates and writes simhash into a file
'''
def calculate_helpers():
with open("info.p", 'rb') as f:
info = pickle.load(f)
urls = info[0] # dict: ID --> url
important_sets = info[1] # list of dict(set)
N = len(urls)
simhash_dict = {}
f1 = open("merge_index.json")
f2 = open("tf_idf.json", 'w')
f3 = open("fp.json", 'w')
for jsonObj in f1:
obj = json.loads(jsonObj)
for word, postings in obj.items():
tf_idf = {} # will be dumping this every iteration for the sake of my poor RAM
for docID, appearances in postings.items():
# weighing based on important words
scale = 1
for i in range(3):
try:
if word in important_sets[i][docID]:
if i == 0: # is a title
scale *= 1.5
elif i == 1: # is a bold
scale *= 1.1
elif i == 2: # is a header
scale *= 1.2
else:
scale *= 1
except KeyError:
scale *= 1
tf = 1 + np.log10(appearances) # tf
idf = np.log10(N/len(postings)) # idf
score = tf*idf*scale
try:
tf_idf[word].update({docID: score})
except KeyError:
tf_idf[word] = ({docID: score})
try:
simhash_dict[docID].update({sim_hashfn(word): score})
except KeyError:
simhash_dict[docID] = {sim_hashfn(word): score}
json.dump({word:f2.tell()}, f3)
f3.write('\n')
json.dump(tf_idf, f2)
f2.write('\n')
f1.close()
f2.close()
f3.close()
# calculate simhash
f = open("simhash_scores.json", 'w')
for ID, hashed_words in simhash_dict.items():
simhash_score = ''
for i in range(64): # all hashed words are 64 bit binary strings
i_th_binary = 0 # the i-th binary value
for hashed_word, weight in hashed_words.items():
if hashed_word[i] == '0':
i_th_binary -= weight
elif hashed_word[i] == '1':
i_th_binary += weight
if i_th_binary > 0:
simhash_score += '1'
else:
simhash_score += '0'
json.dump({ID:simhash_score}, f)
f.write("\n")
f.close()
def query_prompt():
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
# start prompt
start = input("Enter 'start'(s | delete_partitions | identifier_name |
|
SearchEngine.py | line2 = f2.readline()
while(line1 != "" or line2 != ""):
jsonObj1 = {"~~~": ""} if line1 == "" else json.loads(line1) # highest ASCII val
jsonObj2 = {"~~~": ""} if line2 == "" else json.loads(line2) # highest ASCII val
key1, key2 = list(jsonObj1.keys())[0], list(jsonObj2.keys())[0]
if key1 == key2:
jsonObj1[key1].update(jsonObj2[key2])
updated_posting = jsonObj1
line1 = f1.readline()
line2 = f2.readline()
elif key1 < key2:
updated_posting = {key1:jsonObj1[key1]}
line1 = f1.readline()
elif key1 > key2:
updated_posting = {key2:jsonObj2[key2]}
line2 = f2.readline()
else:
print(f"key1: {key1}\tkey2: {key2}")
raise Exception("Error error error")
json.dump(updated_posting, f)
f.write("\n")
f1.close()
f2.close()
f.close()
if i%2 == 1:
files[i+1] = "temp1.json"
elif i%2 == 0:
files[i+1] = "temp2.json"
'''
-makes the inverted index
-For the sake of my computer's well being, the index is partitioned off into
4 pieces and is later merged.
'''
def makeIndex():
ID = 0
ID_url_dict = dict()
doc_count = 56000
doc_counter = 0 #used for the partial indexes
titles_set = defaultdict(set)
headings_set = defaultdict(set)
bold_set = defaultdict(set)
important_sets = [titles_set, bold_set, headings_set]
partitions = 4
num_partitions = 1
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
inverted_index = dict()
#simhash_dict = dict()
for directory in os.listdir(os.getcwd() + "\\DEV"):
path = os.getcwd() + "\\DEV\\" + directory
try:
os.listdir(path)
except:
continue
for file in os.listdir(path):
with open(path + '\\' + file) as f:
data = json.loads(f.read())
soup = bs(data['content'], 'lxml') # parse using bs and lxml
#simhash_file = list() # contains all the hashed words of the documentuih
try:
body = soup.body.get_text()
except:
body = ""
words = tokenizer.tokenize(body) # tokenize the body
for w in words: # stemming
word = ps.stem(w.lower().rstrip())
#simhash_file.append(sim_hashfn(word))
try:
inverted_index[word][ID] += 1
except KeyError:
try:
inverted_index[word].update({ID:1})
except KeyError:
inverted_index[word] = {ID:1}
titles = soup.find_all('title') # find all title tags
bolds = soup.find_all(re.compile(r'.*^(?:b|strong)$')) # find all bold tags
headers = soup.find_all(re.compile('^h[1-6]$')) # find all heading tags
# tokenize all the important/title words
important_words = [titles, bolds, headers]
for i in range(3):
if important_words[i] == None:
continue
for important_word in important_words[i]:
if important_word.string == None:
continue
for word in tokenizer.tokenize(important_word.string):
w = ps.stem(word.lower().rstrip())
important_sets[i][ID].add(w)
# partial index dump (STILL IN THE WORKS)
if doc_counter > doc_count/partitions:
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
num_partitions += 1
doc_counter = 0
ID_url_dict[ID] = data['url']
#simhash_dict.update({ID:simhash_file})
ID += 1
doc_counter += 1
print(ID)
inverted_index = {k:v for k, v in sorted(inverted_index.items())}
with open(f"P{num_partitions}.json", 'w') as f:
for k, v in inverted_index.items():
json.dump({k: v}, f)
f.write('\n')
inverted_index.clear()
with open("info.p", 'wb') as f: # save these informations for later
pickle.dump((ID_url_dict,important_sets) , f)
print("done serializing\n")
'''
1. goes through index line by line (we assume it's sorted here)
2. calculates the tf-idf while taking the important words into account
3. writes the {word: tf-idf} into a file and the {word: <file pointer -> tf-idf entry>} into another
4. also creates and writes simhash into a file
'''
def calculate_helpers():
with open("info.p", 'rb') as f:
info = pickle.load(f)
urls = info[0] # dict: ID --> url
important_sets = info[1] # list of dict(set)
N = len(urls)
simhash_dict = {}
f1 = open("merge_index.json")
f2 = open("tf_idf.json", 'w')
f3 = open("fp.json", 'w')
for jsonObj in f1:
obj = json.loads(jsonObj)
for word, postings in obj.items():
tf_idf = {} # will be dumping this every iteration for the sake of my poor RAM
for docID, appearances in postings.items():
# weighing based on important words
scale = 1
for i in range(3):
try:
if word in important_sets[i][docID]:
if i == 0: # is a title
scale *= 1.5
elif i == 1: # is a bold
scale *= 1.1
elif i == 2: # is a header
scale *= 1.2
else:
scale *= 1
except KeyError:
scale *= 1
tf = 1 + np.log10(appearances) # tf
idf = np.log10(N/len(postings)) # idf
score = tf*idf*scale
try:
tf_idf[word].update({docID: score})
except KeyError:
tf_idf[word] = ({docID: score})
try:
simhash_dict[docID].update({sim_hashfn(word): score}) | json.dump({word:f2.tell()}, f3)
f3.write('\n')
json.dump(tf_idf, f2)
f2.write('\n')
f1.close()
f2.close()
f3.close()
# calculate simhash
f = open("simhash_scores.json", 'w')
for ID, hashed_words in simhash_dict.items():
simhash_score = ''
for i in range(64): # all hashed words are 64 bit binary strings
i_th_binary = 0 # the i-th binary value
for hashed_word, weight in hashed_words.items():
if hashed_word[i] == '0':
i_th_binary -= weight
elif hashed_word[i] == '1':
i_th_binary += weight
if i_th_binary > 0:
simhash_score += '1'
else:
simhash_score += '0'
json.dump({ID:simhash_score}, f)
f.write("\n")
f.close()
def query_prompt():
ps = PorterStemmer()
tokenizer = RegexpTokenizer(r"[a-zA-Z0-9']+")
# start prompt
start = input("Enter 'start'(s) to get started or 'quit'(q) to quit: \n")
start = start.lower()
while(start != "start" and start != "s"):
if start == 'q' or start == 'quit':
exit()
start = input("Unknown command, try again: \n")
# loading auxillary structs
print("Getting ready...")
stop_words = set(stopwords.words("english"))
info = (pickle.load( open( "info.p", 'rb' ) ))
urls = info[0] # ID --> url dict
limit = 5 # limit of number of websites shown *-1* no limit
fp_dict = {}
simhash_scores = {}
with open("fp.json") as f:
for line in f:
fp_dict.update(json.loads(line)) # dict of the file pointers
with open("simhash_scores.json") as f:
for line in f:
simhash_scores.update(json.loads(line))
print(f"{len(fp_dict)} words in index")
f = open("tf_idf.json") # open our index
print | except KeyError:
simhash_dict[docID] = {sim_hashfn(word): score}
| random_line_split |
kelliptic.go | // fields.
//
// This package operates, internally, on Jacobian coordinates. For a given
// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
// calculation can be performed within the transform (as in ScalarMult and
// ScalarBaseMult). But even for Add and Double, it's faster to apply and
// reverse the transform than to operate in affine coordinates.
package kelliptic
import (
"crypto/elliptic"
"errors"
"math/big"
"sync"
)
// A Curve represents a Koblitz Curve with a=0.
// See http://www.hyperellipticurve.org/EFD/g1p/auto-shortw.html
type Curve struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the Curve equation
Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field
}
func (curve *Curve) Params() *elliptic.CurveParams {
return &elliptic.CurveParams{
P: curve.P,
N: curve.N,
B: curve.B,
Gx: curve.Gx,
Gy: curve.Gy,
BitSize: curve.BitSize,
}
}
// IsOnCurve returns true if the given (x,y) lies on the curve.
func (curve *Curve) IsOnCurve(x, y *big.Int) bool {
// y² = x³ + b
y2 := new(big.Int).Mul(y, y) //y²
y2.Mod(y2, curve.P) //y²%P
x3 := new(big.Int).Mul(x, x) //x²
x3.Mul(x3, x) //x³
x3.Add(x3, curve.B) //x³+B
x3.Mod(x3, curve.P) //(x³+B)%P
return x3.Cmp(y2) == 0
}
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file.
//
// TODO(x): double check if the function is okay
func (curve *Curve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
xOut = new(big.Int).Mul(x, zinvsq)
xOut.Mod(xOut, curve.P)
zinvsq.Mul(zinvsq, zinv)
yOut = new(big.Int).Mul(y, zinvsq)
yOut.Mod(yOut, curve.P)
return
}
// Add returns the sum of (x1,y1) and (x2,y2)
func (curve *Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
z := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z, x2, y2, z))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *Curve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperellipticurve.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3 := new(big.Int).Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3 := new(big.Int).Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3 := new(big.Int).Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, z2z2)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// Double returns 2*(x,y)
func (curve *Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
z1 := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
//
// See http://hyperellipticurve.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
func (curve *Curve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
a := new(big.Int).Mul(x, x) //X1²
b := new(big.Int).Mul(y, y) //Y1²
c := new(big.Int).Mul(b, b) //B²
d := new(big.Int).Add(x, b) //X1+B
d.Mul(d, d) //(X1+B)²
d.Sub(d, a) //(X1+B)²-A
d.Sub(d, c) //(X1+B)²-A-C
d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
e := new(big.Int).Mul(big.NewInt(3), a) //3*A
f := new(big.Int).Mul(e, e) //E²
x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
x3.Sub(f, x3) //F-2*D
x3.Mod(x3, curve.P)
y3 := new(big.Int).Sub(d, x3) //D-X3
y3.Mul(e, y3) //E*(D-X3)
y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
y3.Mod(y3, curve.P)
z3 := new(big.Int).Mul(y, z) //Y1*Z1
z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
//
// TODO(x): double check if it is okay
func (curve *Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// We have a slight problem in that the identity of the group (the
// point at infinity) cannot be represented in (x, y) form on a finite
// machine. Thus the standard add/double algorithm has to be tweaked
// slightly | // license that can be found in the LICENSE file.
// Package bitelliptic implements several Koblitz elliptic curves over prime | random_line_split |
|
kelliptic.go | the first true bit in |k|. If we don't find any true bits in
// |k|, then we return nil, nil, because we cannot return the identity
// element.
Bz := new(big.Int).SetInt64(1)
x := Bx
y := By
z := Bz
seenFirstTrue := false
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
if seenFirstTrue {
x, y, z = curve.doubleJacobian(x, y, z)
}
if byte&0x80 == 0x80 {
if !seenFirstTrue {
seenFirstTrue = true
} else {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
}
byte <<= 1
}
}
if !seenFirstTrue {
return nil, nil
}
return curve.affineFromJacobian(x, y, z)
}
// ScalarBaseMult returns k*G, where G is the base point of the group and k is
// an integer in big-endian form.
func (curve *Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
//curve parameters taken from:
//http://www.secg.org/collateral/sec2_final.pdf
var initonce sync.Once
var secp160k1 *Curve
var secp192k1 *Curve
var secp224k1 *Curve
var secp256k1 *Curve
func initAll() {
initS160()
initS192()
initS224()
initS256()
}
func initS160() {
// See SEC 2 section 2.4.1
secp160k1 = new(Curve)
secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16)
secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16)
secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16)
secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16)
secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16)
secp160k1.BitSize = 160
}
func initS192() {
// See SEC 2 section 2.5.1
secp192k1 = new(Curve)
secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16)
secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16)
secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16)
secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16)
secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16)
secp192k1.BitSize = 192
}
func initS224() {
// See SEC 2 section 2.6.1
secp224k1 = new(Curve)
secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16)
secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16)
secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16)
secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16)
secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16)
secp224k1.BitSize = 224
}
func initS256() {
// See SEC 2 section 2.7.1
secp256k1 = new(Curve)
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.BitSize = 256
}
// S160 returns a Curve which implements secp160k1 (see SEC 2 section 2.4.1)
func S160() *Curve {
initonce.Do(initAll)
return secp160k1
}
// S192 returns a Curve which implements secp192k1 (see SEC 2 section 2.5.1)
func S192() *Curve {
initonce.Do(initAll)
return secp192k1
}
// S224 returns a Curve which implements secp224k1 (see SEC 2 section 2.6.1)
func S224() *Curve {
initonce.Do(initAll)
return secp224k1
}
// S256 returns a Curve which implements secp256k1 (see SEC 2 section 2.7.1)
func S256() *Curve {
i | nito | identifier_name |
|
kelliptic.go | cobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *Curve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperellipticurve.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3 := new(big.Int).Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3 := new(big.Int).Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3 := new(big.Int).Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, z2z2)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// Double returns 2*(x,y)
func (curve *Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
z1 := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
//
// See http://hyperellipticurve.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
func (curve *Curve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
a := new(big.Int).Mul(x, x) //X1²
b := new(big.Int).Mul(y, y) //Y1²
c := new(big.Int).Mul(b, b) //B²
d := new(big.Int).Add(x, b) //X1+B
d.Mul(d, d) //(X1+B)²
d.Sub(d, a) //(X1+B)²-A
d.Sub(d, c) //(X1+B)²-A-C
d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
e := new(big.Int).Mul(big.NewInt(3), a) //3*A
f := new(big.Int).Mul(e, e) //E²
x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
x3.Sub(f, x3) //F-2*D
x3.Mod(x3, curve.P)
y3 := new(big.Int).Sub(d, x3) //D-X3
y3.Mul(e, y3) //E*(D-X3)
y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
y3.Mod(y3, curve.P)
z3 := new(big.Int).Mul(y, z) //Y1*Z1
z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
//
// TODO(x): double check if it is okay
func (curve *Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// We have a slight problem in that the identity of the group (the
// point at infinity) cannot be represented in (x, y) form on a finite
// machine. Thus the standard add/double algorithm has to be tweaked
// slightly: our initial state is not the identity, but x, and we
// ignore the first true bit in |k|. If we don't find any true bits in
// |k|, then we return nil, nil, because we cannot return the identity
// element.
Bz := new(big.Int).SetInt64(1)
x := Bx
y := By
z := Bz
seenFirstTrue := false
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
if seenFirstTrue {
x, y, z = curve.doubleJacobian(x, y, z)
}
if byte&0x80 == 0x80 {
if !seenFirstTrue {
seenFirstTrue = true
} else {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
}
byte <<= 1
}
}
if !seenFirstTrue {
return nil, nil
}
return curve.affineFromJacobian(x, y, z)
}
// ScalarBaseMult returns k*G, where G is the base point of the group and k is
// an integer in big-endian form.
func (curve *Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
//curve parameters taken from:
//http://www.secg.org/collateral/sec2_final.pdf
var initonce sync.Once
var secp160k1 *Curve
var secp192k1 *Curve
var secp224k1 *Curve
var secp256k1 *Curve
func initAll() {
initS160()
initS192()
initS224()
initS256()
}
func initS160() {
// See SEC 2 section 2.4.1
secp160k1 = new(Curve)
secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16)
secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16)
secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16)
secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16)
secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F | w(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z, x2, y2, z))
}
// addJa | identifier_body |
|
kelliptic.go | 26F2FC170F69466A74DEFD8D", 16)
secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16)
secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16)
secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16)
secp192k1.BitSize = 192
}
func initS224() {
// See SEC 2 section 2.6.1
secp224k1 = new(Curve)
secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16)
secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16)
secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16)
secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16)
secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16)
secp224k1.BitSize = 224
}
func initS256() {
// See SEC 2 section 2.7.1
secp256k1 = new(Curve)
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.BitSize = 256
}
// S160 returns a Curve which implements secp160k1 (see SEC 2 section 2.4.1)
func S160() *Curve {
initonce.Do(initAll)
return secp160k1
}
// S192 returns a Curve which implements secp192k1 (see SEC 2 section 2.5.1)
func S192() *Curve {
initonce.Do(initAll)
return secp192k1
}
// S224 returns a Curve which implements secp224k1 (see SEC 2 section 2.6.1)
func S224() *Curve {
initonce.Do(initAll)
return secp224k1
}
// S256 returns a Curve which implements secp256k1 (see SEC 2 section 2.7.1)
func S256() *Curve {
initonce.Do(initAll)
return secp256k1
}
func CompressPoint(curve *Curve, X, Y *big.Int) (cp []byte) {
return curve.CompressPoint(X, Y)
}
// Point Compression Routines. These could use a lot of testing.
func (curve *Curve) CompressPoint(X, Y *big.Int) (cp []byte) {
by := new(big.Int).And(Y, big.NewInt(1)).Int64()
bx := X.Bytes()
cp = make([]byte, len(bx)+1)
if by == 1 {
cp[0] = byte(3)
} else {
cp[0] = byte(2)
}
copy(cp[1:], bx)
return
}
func (curve *Curve) DecompressPoint(cp []byte) (X, Y *big.Int, err error) {
var c int64
switch cp[0] { // c = 2 most significant bits of S
case byte(0x03):
c = 1
break
case byte(0x02):
c = 0
break
case byte(0x04): // This is an uncompressed point. Use base Unmarshal.
X, Y = elliptic.Unmarshal(curve, cp)
return
default:
return nil, nil, errors.New("Not a compressed point. (Invalid Header)")
}
byteLen := (curve.Params().BitSize + 7) >> 3
if len(cp) != 1+byteLen {
return nil, nil, errors.New("Not a compressed point. (Require 1 + key size)")
}
X = new(big.Int).SetBytes(cp[1:])
Y = new(big.Int)
Y.Mod(Y.Mul(X, X), curve.P) // solve for y in y**2 = x**3 + x*a + b (mod p)
Y.Mod(Y.Mul(Y, X), curve.P) // assume a = 0
Y.Mod(Y.Add(Y, curve.B), curve.P)
Y = curve.Sqrt(Y)
if Y.Cmp(big.NewInt(0)) == 0 {
return nil, nil, errors.New("Not a compressed point. (Not on curve)")
}
if c != new(big.Int).And(Y, big.NewInt(1)).Int64() {
Y.Sub(curve.P, Y)
}
return
}
// Sqrt returns the module square root.
//
// Modulus must be prime. Some non-prime values will loop indefinately.
// Modulo Square root involves deep magic. You have been warned!
// Uses the Shanks-Tonelli algorithem:
// http://en.wikipedia.org/wiki/Shanks-Tonelli_algorithm
// Translated from a python implementation found here:
// http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
func (curve *Curve) Sqrt(a *big.Int) *big.Int {
ZERO := big.NewInt(0)
ONE := big.NewInt(1)
TWO := big.NewInt(2)
THREE := big.NewInt(3)
FOUR := big.NewInt(4)
p := curve.P
c := new(big.Int)
// Simple Cases
//
if a.Cmp(ZERO) == 0 {
return ZERO
} else if p.Cmp(TWO) == 0 {
return a.Mod(a, p)
} else if LegendreSymbol(a, p) != 1 {
return ZERO
} else if c.Mod(p, FOUR).Cmp(THREE) == 0 {
c.Add(p, ONE)
| c.Div(c, FOUR)
c.Exp(a, c, p)
return c
}
// Partition p-1 | conditional_block |
|
xmlpipe2.py | .server, slave_okay=True)
try:
# busca la entidad principal
main_ntt_id = int(afile["se"]["_id"])
ntt = gconn.ontology.ontology.find_one({"_id":main_ntt_id})
ntts1_info = set()
ntts2_info = set()
if ntt:
afile["se"]["info"] = ntt
# busca entidades de primer y segundo nivel
if "r" in ntt and ntt["r"]:
# genera la lista de entidades y tipos de relacion de primer nivel
ntts1_info = {(ntt_id, relation[:3])
for relation, relation_ids in ntt["r"].iteritems()
for ntt_id in relation_ids if ntt_id!=main_ntt_id}
# si hay entidades de primer nivel...
if ntts1_info:
# obtiene entidades de primer nivel
ntts1_ids = [ntt_id for ntt_id, relation in ntts1_info]
ntts1 = list(gconn.ontology.ontology.find({"_id":{"$in":ntts1_ids}}))
# genera la lista de entidades y tipos de relacion de segundo nivel
ntts1_ids.append(main_ntt_id) # añade el id de la relacion para usar la lista como filtro
ntts2_info = {(ntt_id, relation[:3])
for ntt2 in ntts1 if "r" in ntt2
for relation, relation_ids in ntt2["r"].iteritems()
for ntt_id in relation_ids if ntt_id not in ntts1_ids}
afile["se"]["rel"] = (ntts1_info, ntts2_info)
else:
not_found_ntts.write(str(afile["_id"])+"\n")
not_found_count += 1
del afile["se"]["_id"]
except BaseException:
ntt_id = str(afile["se"]["_id"]) if "_id" in afile["se"] else "???"
del afile["se"]["_id"]
gconn.close()
gconn = None
logging.exception("Error obtaining entities for file %s: %s."%(str(afile["_id"]), ntt_id))
self.results.put(afile)
self.requests.task_done()
if not_found_count:
logging.warn("Entities not found for some files. Check file nf_ntts.csv.")
class FilesFetcher(Thread):
def __init__(self, server, entities_server, filter, batch_size, stop_set, stop_set_len, last_count, processes):
super(FilesFetcher, self).__init__()
self.daemon = True
self.server = server
self.batch_size = batch_size
self.results = Queue.Queue(batch_size*processes)
self.filter = filter
self.complete = False
self.entities = EntitiesFetcher(entities_server, self.results)
self.stop_set = stop_set
self.stop_set_len = stop_set_len
self.total_count = self.last_count = last_count
def run(self):
self.complete = False
gconn = pymongo.Connection(self.server, slave_okay=True)
gdb = gconn.foofind
gfoo = gdb.foo
self.entities.start()
cursor = gfoo.find(self.filter, timeout=False).batch_size(self.batch_size)
if self.stop_set_len:
cursor = cursor.sort([("$natural",pymongo.DESCENDING)])
new_stop_set = set()
must_stop = add_to_stop_set = self.stop_set_len
self.total_count = gfoo.count()
count_limit = max(0,self.total_count-self.last_count)
hard_limit = -100 - int(count_limit/1000.) # limite duro: 1 borrado cada mil ficheros más 100 fijos
for f in cursor:
if not 's' in f:
f['s'] = 9
if self.stop_set_len:
# construye el nuevo stop set
if add_to_stop_set:
new_stop_set.add(str(f["_id"]))
add_to_stop_set -= 1
# comprueba el stop set actual
if str(f["_id"]) in self.stop_set:
must_stop-=1
if must_stop==0:
break
else:
continue
# limite por cantidad de ficheros
count_limit += 1
# para si ya ha recorrido el numero probable de ficheros y ha visto alguno del conjunto de parada
# o si ha visto más del número limite de ficheros
if count_limit<0 and must_stop<self.stop_set_len or count_limit<hard_limit:
if add_to_stop_set and self.stop_set:
new_stop_set.update(self.stop_set)
break
if "se" in f and f["se"]:
self.entities.requests.put(f)
else:
self.results.put(f)
self.entities.requests.put(None)
self.entities.requests.join()
# actualiza el nuevo stop set
if self.stop_set_len:
self.stop_set = new_stop_set
self.complete = True
def __iter__(self):
return self
def next(self):
while True:
if self.results.empty() and self.complete:
raise StopIteration
try:
return self.results.get(True, 3)
except:
pass
space_join = " ".join
XML_ILLEGAL_CHARS_RE = re.compile(u'[\x00-\x08<>\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def tag(_name, _children=None, separator="", children_type=None, **kwargs):
if _children is False:
return u""
else:
attr = (" " + space_join('%s="%s"' % (key, u(val)) for key, val in kwargs.iteritems() if val)) if kwargs else ""
if _children:
if children_type is list:
return u"<%s%s>%s</%s>" % (_name, attr, separator.join(_children), _name)
elif children_type is unicode:
escaped_children = space_join(XML_ILLEGAL_CHARS_RE.split(u(_children)))
if "&" in escaped_children:
return u"<%s%s><![CDATA[%s]]></%s>" % (_name, attr, escaped_children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, escaped_children, _name)
elif children_type is str:
return u"<%s%s>%s</%s>" % (_name, attr, _children, _name)
elif children_type is float:
return u"<%s%s>%.8f</%s>" % (_name, attr, _children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, unicode(_children), _name)
else:
return u"<%s%s/>" % (_name, attr)
def set_globals(fields, attrs, init_file, stats_file):
setattr(sys.modules[__name__], "init_file", init_file)
setattr(sys.modules[__name__], "stats_file", stats_file)
setattr(sys.modules[__name__], "items", [(item["name"], item["field"], item["field_type"]) for item in fields+attrs])
def generate_file(args):
file_id, afile = args
try:
if not init_file(afile): return None, None
doc = [tag(n, afile[f] if f and f in afile and afile[f] else False, children_type=t, separator=",") for n,f,t in items]
return tag("sphinx:document", doc, id=file_id, children_type=list), afile
except BaseException as e:
logging.exception("Error processing file %s.\n"%str(afile["_id"]))
return None, e
outwrite = None
generate_id = None
class XmlPipe2:
def __init__(self, processes, fields, attrs, stats, gen_id):
global outwrite, generate_id
outwrite = codecs.getwriter("utf-8")(sys.stdout).write
self.processes = processes
self.fields = fields
self.attrs = attrs
self.stats = stats
self.pool = Pool(processes=processes) if processes>1 else None
self.count = 0
generate_id = gen_id
def generate_header(self):
outwrite(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><sphinx:docset><sphinx:schema>")
outwrite(u"".join(tag("sphinx:field", name=f["name"]) for f in self.fields))
outwrite(u"".join(tag("sphinx:attr", name=a["name"], type=a["type"], bits=a.get("bits"), default=a.get("default")) for a in self.attrs))
outwrite(u"</sphinx:schema>")
def generate_footer(self):
outwrite(u"</sphinx:docset>")
def generate(self, server, entities_server, part, afilter, batch_size, stop_set=None, stop_set_len=0, last_count=None, headers=True):
ff = FilesFetcher(server, entities_server, afilter, batch_size, stop_set, stop_set_len, last_count, self.processes)
ff.start() | if headers: self.generate_header() | random_line_split |
|
xmlpipe2.py | iene entidades de primer nivel
ntts1_ids = [ntt_id for ntt_id, relation in ntts1_info]
ntts1 = list(gconn.ontology.ontology.find({"_id":{"$in":ntts1_ids}}))
# genera la lista de entidades y tipos de relacion de segundo nivel
ntts1_ids.append(main_ntt_id) # añade el id de la relacion para usar la lista como filtro
ntts2_info = {(ntt_id, relation[:3])
for ntt2 in ntts1 if "r" in ntt2
for relation, relation_ids in ntt2["r"].iteritems()
for ntt_id in relation_ids if ntt_id not in ntts1_ids}
afile["se"]["rel"] = (ntts1_info, ntts2_info)
else:
not_found_ntts.write(str(afile["_id"])+"\n")
not_found_count += 1
del afile["se"]["_id"]
except BaseException:
ntt_id = str(afile["se"]["_id"]) if "_id" in afile["se"] else "???"
del afile["se"]["_id"]
gconn.close()
gconn = None
logging.exception("Error obtaining entities for file %s: %s."%(str(afile["_id"]), ntt_id))
self.results.put(afile)
self.requests.task_done()
if not_found_count:
logging.warn("Entities not found for some files. Check file nf_ntts.csv.")
class FilesFetcher(Thread):
def __init__(self, server, entities_server, filter, batch_size, stop_set, stop_set_len, last_count, processes):
super(FilesFetcher, self).__init__()
self.daemon = True
self.server = server
self.batch_size = batch_size
self.results = Queue.Queue(batch_size*processes)
self.filter = filter
self.complete = False
self.entities = EntitiesFetcher(entities_server, self.results)
self.stop_set = stop_set
self.stop_set_len = stop_set_len
self.total_count = self.last_count = last_count
def run(self):
self.complete = False
gconn = pymongo.Connection(self.server, slave_okay=True)
gdb = gconn.foofind
gfoo = gdb.foo
self.entities.start()
cursor = gfoo.find(self.filter, timeout=False).batch_size(self.batch_size)
if self.stop_set_len:
cursor = cursor.sort([("$natural",pymongo.DESCENDING)])
new_stop_set = set()
must_stop = add_to_stop_set = self.stop_set_len
self.total_count = gfoo.count()
count_limit = max(0,self.total_count-self.last_count)
hard_limit = -100 - int(count_limit/1000.) # limite duro: 1 borrado cada mil ficheros más 100 fijos
for f in cursor:
if not 's' in f:
f['s'] = 9
if self.stop_set_len:
# construye el nuevo stop set
if add_to_stop_set:
new_stop_set.add(str(f["_id"]))
add_to_stop_set -= 1
# comprueba el stop set actual
if str(f["_id"]) in self.stop_set:
must_stop-=1
if must_stop==0:
break
else:
continue
# limite por cantidad de ficheros
count_limit += 1
# para si ya ha recorrido el numero probable de ficheros y ha visto alguno del conjunto de parada
# o si ha visto más del número limite de ficheros
if count_limit<0 and must_stop<self.stop_set_len or count_limit<hard_limit:
if add_to_stop_set and self.stop_set:
new_stop_set.update(self.stop_set)
break
if "se" in f and f["se"]:
self.entities.requests.put(f)
else:
self.results.put(f)
self.entities.requests.put(None)
self.entities.requests.join()
# actualiza el nuevo stop set
if self.stop_set_len:
self.stop_set = new_stop_set
self.complete = True
def __iter__(self):
return self
def next(self):
while True:
if self.results.empty() and self.complete:
raise StopIteration
try:
return self.results.get(True, 3)
except:
pass
space_join = " ".join
XML_ILLEGAL_CHARS_RE = re.compile(u'[\x00-\x08<>\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def tag(_name, _children=None, separator="", children_type=None, **kwargs):
if _children is False:
return u""
else:
attr = (" " + space_join('%s="%s"' % (key, u(val)) for key, val in kwargs.iteritems() if val)) if kwargs else ""
if _children:
if children_type is list:
return u"<%s%s>%s</%s>" % (_name, attr, separator.join(_children), _name)
elif children_type is unicode:
escaped_children = space_join(XML_ILLEGAL_CHARS_RE.split(u(_children)))
if "&" in escaped_children:
return u"<%s%s><![CDATA[%s]]></%s>" % (_name, attr, escaped_children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, escaped_children, _name)
elif children_type is str:
return u"<%s%s>%s</%s>" % (_name, attr, _children, _name)
elif children_type is float:
return u"<%s%s>%.8f</%s>" % (_name, attr, _children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, unicode(_children), _name)
else:
return u"<%s%s/>" % (_name, attr)
def set_globals(fields, attrs, init_file, stats_file):
setattr(sys.modules[__name__], "init_file", init_file)
setattr(sys.modules[__name__], "stats_file", stats_file)
setattr(sys.modules[__name__], "items", [(item["name"], item["field"], item["field_type"]) for item in fields+attrs])
def generate_file(args):
file_id, afile = args
try:
if not init_file(afile): return None, None
doc = [tag(n, afile[f] if f and f in afile and afile[f] else False, children_type=t, separator=",") for n,f,t in items]
return tag("sphinx:document", doc, id=file_id, children_type=list), afile
except BaseException as e:
logging.exception("Error processing file %s.\n"%str(afile["_id"]))
return None, e
outwrite = None
generate_id = None
class XmlPipe2:
def __init__(self, processes, fields, attrs, stats, gen_id):
global outwrite, generate_id
outwrite = codecs.getwriter("utf-8")(sys.stdout).write
self.processes = processes
self.fields = fields
self.attrs = attrs
self.stats = stats
self.pool = Pool(processes=processes) if processes>1 else None
self.count = 0
generate_id = gen_id
def generate_header(self):
outwrite(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><sphinx:docset><sphinx:schema>")
outwrite(u"".join(tag("sphinx:field", name=f["name"]) for f in self.fields))
outwrite(u"".join(tag("sphinx:attr", name=a["name"], type=a["type"], bits=a.get("bits"), default=a.get("default")) for a in self.attrs))
outwrite(u"</sphinx:schema>")
def generate_footer(self):
outwrite(u"</sphinx:docset>")
def generate(self, server, entities_server, part, afilter, batch_size, stop_set=None, stop_set_len=0, last_count=None, headers=True):
ff = | FilesFetcher(server, entities_server, afilter, batch_size, stop_set, stop_set_len, last_count, self.processes)
ff.start()
if headers: self.generate_header()
count = error_count = 0
logging.warn("Comienza indexado en servidor %s."%server)
if self.pool:
for doc, extra in self.pool.imap(generate_file, (generate_id(afile, part) for afile in ff)):
count+=1
if doc:
outwrite(doc)
stats_file(extra, self.stats)
elif extra:
error_count += 1
if error_count>100: raise extra # ante mas de 100 errores, detiene la indexacion con error
if count%1000000==0:
outwrite("\n")
logging.warn("Progreso de indexado del servidor %s."%(server), extra={"count":count, "error_count":error_count})
else:
for afile in ff:
doc, extra = generate_file(generate_id(afile, part)) | identifier_body |
|
xmlpipe2.py | (self):
gconn = None
not_found_count = 0
with open("nf_ntts.csv", "w") as not_found_ntts:
while True:
# obtiene peticiones de buscar entidades
afile = self.requests.get(True)
if afile is None:
self.requests.task_done()
break
if not gconn:
gconn = pymongo.Connection(self.server, slave_okay=True)
try:
# busca la entidad principal
main_ntt_id = int(afile["se"]["_id"])
ntt = gconn.ontology.ontology.find_one({"_id":main_ntt_id})
ntts1_info = set()
ntts2_info = set()
if ntt:
afile["se"]["info"] = ntt
# busca entidades de primer y segundo nivel
if "r" in ntt and ntt["r"]:
# genera la lista de entidades y tipos de relacion de primer nivel
ntts1_info = {(ntt_id, relation[:3])
for relation, relation_ids in ntt["r"].iteritems()
for ntt_id in relation_ids if ntt_id!=main_ntt_id}
# si hay entidades de primer nivel...
if ntts1_info:
# obtiene entidades de primer nivel
ntts1_ids = [ntt_id for ntt_id, relation in ntts1_info]
ntts1 = list(gconn.ontology.ontology.find({"_id":{"$in":ntts1_ids}}))
# genera la lista de entidades y tipos de relacion de segundo nivel
ntts1_ids.append(main_ntt_id) # añade el id de la relacion para usar la lista como filtro
ntts2_info = {(ntt_id, relation[:3])
for ntt2 in ntts1 if "r" in ntt2
for relation, relation_ids in ntt2["r"].iteritems()
for ntt_id in relation_ids if ntt_id not in ntts1_ids}
afile["se"]["rel"] = (ntts1_info, ntts2_info)
else:
not_found_ntts.write(str(afile["_id"])+"\n")
not_found_count += 1
del afile["se"]["_id"]
except BaseException:
ntt_id = str(afile["se"]["_id"]) if "_id" in afile["se"] else "???"
del afile["se"]["_id"]
gconn.close()
gconn = None
logging.exception("Error obtaining entities for file %s: %s."%(str(afile["_id"]), ntt_id))
self.results.put(afile)
self.requests.task_done()
if not_found_count:
logging.warn("Entities not found for some files. Check file nf_ntts.csv.")
class FilesFetcher(Thread):
def __init__(self, server, entities_server, filter, batch_size, stop_set, stop_set_len, last_count, processes):
super(FilesFetcher, self).__init__()
self.daemon = True
self.server = server
self.batch_size = batch_size
self.results = Queue.Queue(batch_size*processes)
self.filter = filter
self.complete = False
self.entities = EntitiesFetcher(entities_server, self.results)
self.stop_set = stop_set
self.stop_set_len = stop_set_len
self.total_count = self.last_count = last_count
def run(self):
self.complete = False
gconn = pymongo.Connection(self.server, slave_okay=True)
gdb = gconn.foofind
gfoo = gdb.foo
self.entities.start()
cursor = gfoo.find(self.filter, timeout=False).batch_size(self.batch_size)
if self.stop_set_len:
cursor = cursor.sort([("$natural",pymongo.DESCENDING)])
new_stop_set = set()
must_stop = add_to_stop_set = self.stop_set_len
self.total_count = gfoo.count()
count_limit = max(0,self.total_count-self.last_count)
hard_limit = -100 - int(count_limit/1000.) # limite duro: 1 borrado cada mil ficheros más 100 fijos
for f in cursor:
if not 's' in f:
f['s'] = 9
if self.stop_set_len:
# construye el nuevo stop set
if add_to_stop_set:
new_stop_set.add(str(f["_id"]))
add_to_stop_set -= 1
# comprueba el stop set actual
if str(f["_id"]) in self.stop_set:
must_stop-=1
if must_stop==0:
break
else:
continue
# limite por cantidad de ficheros
count_limit += 1
# para si ya ha recorrido el numero probable de ficheros y ha visto alguno del conjunto de parada
# o si ha visto más del número limite de ficheros
if count_limit<0 and must_stop<self.stop_set_len or count_limit<hard_limit:
if add_to_stop_set and self.stop_set:
new_stop_set.update(self.stop_set)
break
if "se" in f and f["se"]:
self.entities.requests.put(f)
else:
self.results.put(f)
self.entities.requests.put(None)
self.entities.requests.join()
# actualiza el nuevo stop set
if self.stop_set_len:
self.stop_set = new_stop_set
self.complete = True
def __iter__(self):
return self
def next(self):
while True:
if self.results.empty() and self.complete:
raise StopIteration
try:
return self.results.get(True, 3)
except:
pass
space_join = " ".join
XML_ILLEGAL_CHARS_RE = re.compile(u'[\x00-\x08<>\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def tag(_name, _children=None, separator="", children_type=None, **kwargs):
if _children is False:
return u""
else:
attr = (" " + space_join('%s="%s"' % (key, u(val)) for key, val in kwargs.iteritems() if val)) if kwargs else ""
if _children:
if children_type is list:
return u"<%s%s>%s</%s>" % (_name, attr, separator.join(_children), _name)
elif children_type is unicode:
escaped_children = space_join(XML_ILLEGAL_CHARS_RE.split(u(_children)))
if "&" in escaped_children:
return u"<%s%s><![CDATA[%s]]></%s>" % (_name, attr, escaped_children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, escaped_children, _name)
elif children_type is str:
return u"<%s%s>%s</%s>" % (_name, attr, _children, _name)
elif children_type is float:
return u"<%s%s>%.8f</%s>" % (_name, attr, _children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, unicode(_children), _name)
else:
return u"<%s%s/>" % (_name, attr)
def set_globals(fields, attrs, init_file, stats_file):
setattr(sys.modules[__name__], "init_file", init_file)
setattr(sys.modules[__name__], "stats_file", stats_file)
setattr(sys.modules[__name__], "items", [(item["name"], item["field"], item["field_type"]) for item in fields+attrs])
def generate_file(args):
file_id, afile = args
try:
if not init_file(afile): return None, None
doc = [tag(n, afile[f] if f and f in afile and afile[f] else False, children_type=t, separator=",") for n,f,t in items]
return tag("sphinx:document", doc, id=file_id, children_type=list), afile
except BaseException as e:
logging.exception("Error processing file %s.\n"%str(afile["_id"]))
return None, e
outwrite = None
generate_id = None
class XmlPipe2:
def __init__(self, processes, fields, attrs, stats, gen_id):
global outwrite, generate_id
outwrite = codecs.getwriter("utf-8")(sys.stdout).write
self.processes = processes
self.fields = fields
self.attrs = attrs
self.stats = stats
self.pool = Pool(processes=processes) if processes>1 else None
self.count = 0
generate_id = gen_id
def generate_header(self):
outwrite(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><sphinx:docset><sphinx:schema>")
outwrite(u"".join(tag("sphinx:field", name=f["name"]) for f in self.fields))
outwrite(u"".join(tag("sphinx:attr", name=a["name"], type=a["type"], bits=a.get("bits"), default=a.get("default")) for a in self.attrs))
outwrite(u"</sphinx:schema>")
def generate_footer(self):
| run | identifier_name |
|
xmlpipe2.py | .requests.get(True)
if afile is None:
self.requests.task_done()
break
if not gconn:
gconn = pymongo.Connection(self.server, slave_okay=True)
try:
# busca la entidad principal
main_ntt_id = int(afile["se"]["_id"])
ntt = gconn.ontology.ontology.find_one({"_id":main_ntt_id})
ntts1_info = set()
ntts2_info = set()
if ntt:
afile["se"]["info"] = ntt
# busca entidades de primer y segundo nivel
if "r" in ntt and ntt["r"]:
# genera la lista de entidades y tipos de relacion de primer nivel
ntts1_info = {(ntt_id, relation[:3])
for relation, relation_ids in ntt["r"].iteritems()
for ntt_id in relation_ids if ntt_id!=main_ntt_id}
# si hay entidades de primer nivel...
if ntts1_info:
# obtiene entidades de primer nivel
ntts1_ids = [ntt_id for ntt_id, relation in ntts1_info]
ntts1 = list(gconn.ontology.ontology.find({"_id":{"$in":ntts1_ids}}))
# genera la lista de entidades y tipos de relacion de segundo nivel
ntts1_ids.append(main_ntt_id) # añade el id de la relacion para usar la lista como filtro
ntts2_info = {(ntt_id, relation[:3])
for ntt2 in ntts1 if "r" in ntt2
for relation, relation_ids in ntt2["r"].iteritems()
for ntt_id in relation_ids if ntt_id not in ntts1_ids}
afile["se"]["rel"] = (ntts1_info, ntts2_info)
else:
not_found_ntts.write(str(afile["_id"])+"\n")
not_found_count += 1
del afile["se"]["_id"]
except BaseException:
ntt_id = str(afile["se"]["_id"]) if "_id" in afile["se"] else "???"
del afile["se"]["_id"]
gconn.close()
gconn = None
logging.exception("Error obtaining entities for file %s: %s."%(str(afile["_id"]), ntt_id))
self.results.put(afile)
self.requests.task_done()
if not_found_count:
logging.warn("Entities not found for some files. Check file nf_ntts.csv.")
class FilesFetcher(Thread):
def __init__(self, server, entities_server, filter, batch_size, stop_set, stop_set_len, last_count, processes):
super(FilesFetcher, self).__init__()
self.daemon = True
self.server = server
self.batch_size = batch_size
self.results = Queue.Queue(batch_size*processes)
self.filter = filter
self.complete = False
self.entities = EntitiesFetcher(entities_server, self.results)
self.stop_set = stop_set
self.stop_set_len = stop_set_len
self.total_count = self.last_count = last_count
def run(self):
self.complete = False
gconn = pymongo.Connection(self.server, slave_okay=True)
gdb = gconn.foofind
gfoo = gdb.foo
self.entities.start()
cursor = gfoo.find(self.filter, timeout=False).batch_size(self.batch_size)
if self.stop_set_len:
cursor = cursor.sort([("$natural",pymongo.DESCENDING)])
new_stop_set = set()
must_stop = add_to_stop_set = self.stop_set_len
self.total_count = gfoo.count()
count_limit = max(0,self.total_count-self.last_count)
hard_limit = -100 - int(count_limit/1000.) # limite duro: 1 borrado cada mil ficheros más 100 fijos
for f in cursor:
if not 's' in f:
f['s'] = 9
if self.stop_set_len:
# construye el nuevo stop set
if add_to_stop_set:
new_stop_set.add(str(f["_id"]))
add_to_stop_set -= 1
# comprueba el stop set actual
if str(f["_id"]) in self.stop_set:
must_stop-=1
if must_stop==0:
break
else:
continue
# limite por cantidad de ficheros
count_limit += 1
# para si ya ha recorrido el numero probable de ficheros y ha visto alguno del conjunto de parada
# o si ha visto más del número limite de ficheros
if count_limit<0 and must_stop<self.stop_set_len or count_limit<hard_limit:
if a | if "se" in f and f["se"]:
self.entities.requests.put(f)
else:
self.results.put(f)
self.entities.requests.put(None)
self.entities.requests.join()
# actualiza el nuevo stop set
if self.stop_set_len:
self.stop_set = new_stop_set
self.complete = True
def __iter__(self):
return self
def next(self):
while True:
if self.results.empty() and self.complete:
raise StopIteration
try:
return self.results.get(True, 3)
except:
pass
space_join = " ".join
XML_ILLEGAL_CHARS_RE = re.compile(u'[\x00-\x08<>\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def tag(_name, _children=None, separator="", children_type=None, **kwargs):
if _children is False:
return u""
else:
attr = (" " + space_join('%s="%s"' % (key, u(val)) for key, val in kwargs.iteritems() if val)) if kwargs else ""
if _children:
if children_type is list:
return u"<%s%s>%s</%s>" % (_name, attr, separator.join(_children), _name)
elif children_type is unicode:
escaped_children = space_join(XML_ILLEGAL_CHARS_RE.split(u(_children)))
if "&" in escaped_children:
return u"<%s%s><![CDATA[%s]]></%s>" % (_name, attr, escaped_children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, escaped_children, _name)
elif children_type is str:
return u"<%s%s>%s</%s>" % (_name, attr, _children, _name)
elif children_type is float:
return u"<%s%s>%.8f</%s>" % (_name, attr, _children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, unicode(_children), _name)
else:
return u"<%s%s/>" % (_name, attr)
def set_globals(fields, attrs, init_file, stats_file):
setattr(sys.modules[__name__], "init_file", init_file)
setattr(sys.modules[__name__], "stats_file", stats_file)
setattr(sys.modules[__name__], "items", [(item["name"], item["field"], item["field_type"]) for item in fields+attrs])
def generate_file(args):
file_id, afile = args
try:
if not init_file(afile): return None, None
doc = [tag(n, afile[f] if f and f in afile and afile[f] else False, children_type=t, separator=",") for n,f,t in items]
return tag("sphinx:document", doc, id=file_id, children_type=list), afile
except BaseException as e:
logging.exception("Error processing file %s.\n"%str(afile["_id"]))
return None, e
outwrite = None
generate_id = None
class XmlPipe2:
def __init__(self, processes, fields, attrs, stats, gen_id):
global outwrite, generate_id
outwrite = codecs.getwriter("utf-8")(sys.stdout).write
self.processes = processes
self.fields = fields
self.attrs = attrs
self.stats = stats
self.pool = Pool(processes=processes) if processes>1 else None
self.count = 0
generate_id = gen_id
def generate_header(self):
outwrite(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><sphinx:docset><sphinx:schema>")
outwrite(u"".join(tag("sphinx:field", name=f["name"]) for f in self.fields))
outwrite(u"".join(tag("sphinx:attr", name=a["name"], type=a["type"], bits=a.get("bits"), default=a.get("default")) for a in self.attrs))
outwrite(u"</sphinx:schema>")
def generate_footer(self):
outwrite(u"</sphinx:docset>")
def generate(self, server, entities_server, part, afilter, batch_size, stop_set=None, stop_set_len=0, last_count=None, headers=True):
ff = FilesFetcher(server, | dd_to_stop_set and self.stop_set:
new_stop_set.update(self.stop_set)
break
| conditional_block |
script.js | let next_b2 = {x: balls[b2].x + balls[b2].vx * time_delta_s,
y: balls[b2].y + balls[b2].vy * time_delta_s,
r: balls[b2].r};
if (overlap(next_b1, next_b2)){
//asignning deltaX and deltaY for the positions of the balls
let deltaX = balls[b2].x - balls[b1].x;
let deltaY = balls[b2].y - balls[b1].y;
//initialising the current normal and tangental velocities to the collision for each ball
let normVel1 = normal_vel(deltaX, deltaY, b1);
let normVel2 = normal_vel(deltaX, deltaY, b2);
let tangVel1 = tangent_vel(deltaX, deltaY, b1);
let tangVel2 = tangent_vel(deltaX, deltaY, b2);
//applying the 'momentum' function to these velocities to work out the post collison velocities
let xNormVels = momentum(normVel1.x, normVel2.x, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
let yNormVels = momentum(normVel1.y, normVel2.y, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
//reassigning the post collision velocities
normVel1.x = xNormVels[0] * cor;
normVel2.x = xNormVels[1] * cor;
normVel1.y = yNormVels[0] * cor;
normVel2.y = yNormVels[1] * cor;
//setting the actual velocities of the balls to the sum of the normal and tangental velocities
balls[b1].vx = normVel1.x + tangVel1.x;
balls[b1].vy = normVel1.y + tangVel1.y;
balls[b2].vx = normVel2.x + tangVel2.x;
balls[b2].vy = normVel2.y + tangVel2.y;
}
}
}
}
function normal_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((-1 * deltaX * balls[b].vx) - (deltaY * balls[b].vy));
let nX = k * deltaX;
let nY = k * deltaY;
return {x: nX, y: nY};
}
function tangent_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((deltaY * balls[b].vx) - (deltaX * balls[b].vy));
let tX = k * -1 * deltaY;
let tY = k * deltaX;
return {x: tX, y: tY};
}
function bollard_collisions(){
//SPLIT THIS FUNCTION TO AN INDIVIDUAL bollard_collision(ball_index) function
for (let i = 0; i < balls.length; i++){
let collided = false;
//nxtb is next ball
let nxtb = {x: balls[i].x + balls[i].vx * time_delta_s,
y: balls[i].y + balls[i].vy * time_delta_s,
r: balls[i].r}
for (let bollard of bollards){
switch (bollard.type){
case 'rect':
//array of [sidex, sidey, opp. sidex, opp. sidey]
let sides = [[0,1,3,2],[1,2,0,3],[2,3,1,0],[0,3,1,2]];
for (let side of sides){
let cs = side.map(c=>bollard.points[c]);
if (Math.abs(point_line_dist(nxtb, cs[0], cs[1])) < nxtb.r && //if hitting this side...
point_line_dist(nxtb, cs[0], cs[2]) * //and in between edge sides
point_line_dist(nxtb, cs[1], cs[3]) < 0) {//== dist(cs[0], cs[1])){
//see papers for calculations (var meanings) ~done on 19/1/19
let dx = cs[1].x - cs[0].x;
let dy = cs[1].y - cs[0].y;
let a = (dx*balls[i].vx+dy*balls[i].vy)/(dx*dx+dy*dy);
let b = (dy*balls[i].vx-dx*balls[i].vy)/(dx*dx+dy*dy);
let vel_tang = {x: dx * a, y: dy * a};
let vel_norm = {x: dy * b, y: -dx * b};
//reverse normal component
vel_norm.x *= -1;
vel_norm.y *= -1;
//reassign components
balls[i].vx = vel_tang.x + vel_norm.x;
balls[i].vy = vel_tang.y + vel_norm.y;
collided = true;
}
}
break;
case 'circle':
break;
}
if (collided) break;
}
if (collided) break;
}
return;
//////////////
for (let bol = 0; bol < bollards.length; bol++){
for (let bal = 0; bal < balls.length; bal++){
let bollard = bollards[bol];
let ball = balls[bal];
let next_ball = {x: ball.x + ball.vx * time_delta_s,
y: ball.y + ball.vy * time_delta_s,
r: ball.r}
if (overlap(ball, bollard)){
//asignning deltaX and deltaY for the positions of the balls
let deltaX = ball.x - bollard.x;
let deltaY = ball.y - bollard.y;
//initialising the current normal and tangental velocities to the collision for each ball
let normVel2 = normal_vel(deltaX, deltaY, bal);
let tangVel2 = tangent_vel(deltaX, deltaY, bal);
//applying the 'momentum' function to these velocities to work out the post colliison velocities
let xNormVels = momentum(0, normVel2.x, 100000000000000, ball.r);
let yNormVels = momentum(0, normVel2.y, 100000000000000, ball.r);
//reassigning the post collision velocities
normVel2.x = xNormVels[1] * cor;
normVel2.y = yNormVels[1] * cor;
//setting the actual velocities of the balls to the sum of the normal and tangental velocities
ball.vx = normVel2.x + tangVel2.x;
ball.vy = normVel2.y + tangVel2.y;
}
}
}
}
function point_line_dist(p, c1, c2){
//returns distance between p and the line with c1 and c2 on it
//see papers for calculations
return (-p.x*(c2.y-c1.y)+p.y*(c2.x-c1.x)+c1.x*c2.y-c2.x*c1.y)/dist(c1,c2);
}
//distance between two objects with x,y attrs.
function dist(c1, c2){
return ((c2.x - c1.x) ** 2 + (c2.y - c1.y) ** 2) ** 0.5;
}
//random float (incl. min, excl. max)
function rand_num(min, max){
return Math.random() * (max - min) + min;
}
//takes two objects with centres and radii and returns if the circles overlap
function overlap(b1, b2){
if (dist(b1, b2) < b1.r + b2.r){
return true;
}
return false;
}
//returns final velocities of masses after a 1d collision
function momentum(u1, u2, m1, m2){
//http://farside.ph.utexas.edu/teaching/301/lectures/node76.html
let v1 = (u1 * (m1 - m2) + 2 * m2 * u2) / (m1 + m2);
let v2 = (u2 * (m2 - m1) + 2 * m1 * u1) / (m1 + m2);
return [v1, v2];
}
//draws a circle on the main canvas
function draw_circle(x, y, radius, color){
//color = ('0' + (x / cnvs.width) * 0xff).toString(16).substr(-2).repeat(3);
ctx.beginPath();
ctx.arc(x, y, radius, 0, Math.PI * 2);
ctx.fillStyle = color;
ctx.fill();
}
//blanks the canvas for next update
function clear_screen(){
if (tracing) return;
ctx.clearRect(0, 0, cnvs.width, cnvs.height);
}
function | fill_polygon | identifier_name |
|
script.js | 2','#45fe3a','#4efd31','#58fc2a','#61f923','#6bf61c','#75f216'];
function fit_to_screen(){
cnvs.height = cnvs.width = innerHeight; //update to change dynamically
}
//initialising arrays
let balls = [];
let bollards = [];
//variables to be initialised by url params
let gravity;
let cor;
let collisions;
let tracing;
let time_delta_s;
let last_time_ms;
/* the simulation is done in the coordinate system of the bulder canvas,
* but then in the draw_screen() function, coordinates are mapped back to
* this canvases coordinate system
* TODO: ^ change this to a 512x512 canvas and scale with CSS
*/
let builder_size;
/******************
MAIN LOOP
*******************/
function update(time_ms){
time_delta_s = last_time_ms ? (time_ms - last_time_ms) / 1000 : 0;
last_time_ms = time_ms;
//start by introducing gravity
apply_gravity();
//check and modify velocities if any collisions
wall_collisions();
ball_collisions();
bollard_collisions();
//update the balls' positions based on their velocities
update_ball_positions();
//re-draw
clear_screen();
draw_screen();
requestAnimationFrame(update);
}
fit_to_screen();
load_url_params();
requestAnimationFrame(update);
/******************
UTILITY FUNCS
******************/
function load_url_params(){
let data = JSON.parse(decodeURIComponent(location.href.split('?data=')[1]));
let spawn_areas = data.spawn_areas;
bollards = data.bollards;
tracing = data.tracing; collisions = data.collisions;
gravity = data.gravity; cor = data.cor;
builder_size = data.size;
for (let spawn of spawn_areas){
for (let i = 0; i < spawn.no_balls; i++){
let vel_components = get_vel_components(
rand_num(spawn.min_vel, spawn.max_vel),
spawn.vel_angle == -1 ? rand_num(0, 360) : 360-spawn.vel_angle
);
balls.push({
x: rand_num(spawn.x, spawn.x + spawn.w),
y: rand_num(spawn.y, spawn.y + spawn.h),
vx: vel_components.x,
vy: vel_components.y,
r: rand_num(spawn.min_radius, spawn.max_radius),
c: colors[parseInt(rand_num(0, colors.length))],
});
}
}
}
function get_vel_components(r, theta){
//theta is magnitude, theta is angle in degrees
return {x: r * Math.cos(theta * (Math.PI / 180)),
y: r * Math.sin(theta * (Math.PI / 180))};
}
function populate_balls(){
for (let b = 0; b < no_balls; b++){
balls.push({x: rand_num(spawn.x, spawn.x + spawn.w),
y: rand_num(spawn.y, spawn.y + spawn.h),
vx: rand_num(velocity * -1, velocity),
vy: rand_num(velocity * -1, velocity),
r: rand_num(radius.min, radius.max),
c: colors[parseInt(rand_num(0, colors.length))]});
}
}
function draw_screen(){
//draw the balls
for (let ball of balls){
draw_circle(ball.x/builder_size*cnvs.width,
ball.y/builder_size*cnvs.width,
ball.r/builder_size*cnvs.width, ball.c);
}
//draw bollards
for (let bollard of bollards){
switch (bollard.type){
case 'rect':
fill_polygon(bollard.points.map(p=>({x:p.x/builder_size*cnvs.width,
y:p.y/builder_size*cnvs.width})),
'#4efd');
break;
case 'circle':
//not implemented yet
break;
}
}
}
//adds the ball's velocities to their coordinates and applies gravity
function update_ball_positions(){
for (let b = 0; b < balls.length; b++){
let ball = balls[b];
ball.x += time_delta_s * ball.vx;
ball.y += time_delta_s * ball.vy;
}
}
//adds the gravity acceleration to the ball's vertical velocities
| balls[b].vy += gravity * time_delta_s;
}
}
function wall_collisions(){
for (let i = 0; i < balls.length; i++){
let ball = balls[i];
let nx = ball.x + ball.vx * time_delta_s;
let ny = ball.y + ball.vy * time_delta_s;
//set ball's position to over the wall so comes back in same position
if (nx - ball.r < 0 || nx + ball.r > builder_size){
ball.vx *= -1 * cor;
ball.x = ball.x + -ball.vx * time_delta_s;
}
if (ny - ball.r < 0 || ny + ball.r > builder_size){
ball.vy *= -1 * cor;
ball.y = ball.y + -ball.vy * time_delta_s;
}
}
}
function ball_collisions(){
if (!collisions) return
for (let b1 = 0; b1 < balls.length; b1++ ){
for (let b2 = b1 + 1; b2 < balls.length; b2++){
let next_b1 = {x: balls[b1].x + balls[b1].vx * time_delta_s,
y: balls[b1].y + balls[b1].vy * time_delta_s,
r: balls[b1].r};
let next_b2 = {x: balls[b2].x + balls[b2].vx * time_delta_s,
y: balls[b2].y + balls[b2].vy * time_delta_s,
r: balls[b2].r};
if (overlap(next_b1, next_b2)){
//asignning deltaX and deltaY for the positions of the balls
let deltaX = balls[b2].x - balls[b1].x;
let deltaY = balls[b2].y - balls[b1].y;
//initialising the current normal and tangental velocities to the collision for each ball
let normVel1 = normal_vel(deltaX, deltaY, b1);
let normVel2 = normal_vel(deltaX, deltaY, b2);
let tangVel1 = tangent_vel(deltaX, deltaY, b1);
let tangVel2 = tangent_vel(deltaX, deltaY, b2);
//applying the 'momentum' function to these velocities to work out the post collison velocities
let xNormVels = momentum(normVel1.x, normVel2.x, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
let yNormVels = momentum(normVel1.y, normVel2.y, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
//reassigning the post collision velocities
normVel1.x = xNormVels[0] * cor;
normVel2.x = xNormVels[1] * cor;
normVel1.y = yNormVels[0] * cor;
normVel2.y = yNormVels[1] * cor;
//setting the actual velocities of the balls to the sum of the normal and tangental velocities
balls[b1].vx = normVel1.x + tangVel1.x;
balls[b1].vy = normVel1.y + tangVel1.y;
balls[b2].vx = normVel2.x + tangVel2.x;
balls[b2].vy = normVel2.y + tangVel2.y;
}
}
}
}
function normal_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((-1 * deltaX * balls[b].vx) - (deltaY * balls[b].vy));
let nX = k * deltaX;
let nY = k * deltaY;
return {x: nX, y: nY};
}
function tangent_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((deltaY * balls[b].vx) - (deltaX * balls[b].vy));
let tX = k * -1 * deltaY;
let tY = k * deltaX;
return {x: tX, y: tY};
}
function bollard_collisions(){
//SPLIT THIS FUNCTION TO AN INDIVIDUAL bollard_collision(ball_index) function
for (let i = 0; i < balls.length; i++){
let collided = false;
//nxtb is next ball
let nxtb = {x: balls[i].x + balls[i].vx * time_delta_s,
y: balls[i].y + balls[i].vy * time_delta_s,
r: balls[i].r}
for (let bollard of bollards){
switch (bollard.type){
case 'rect':
//array of | function apply_gravity(){
for (let b = 0; b < balls.length; b++){
| random_line_split |
script.js | deltaX and deltaY for the positions of the balls
let deltaX = balls[b2].x - balls[b1].x;
let deltaY = balls[b2].y - balls[b1].y;
//initialising the current normal and tangental velocities to the collision for each ball
let normVel1 = normal_vel(deltaX, deltaY, b1);
let normVel2 = normal_vel(deltaX, deltaY, b2);
let tangVel1 = tangent_vel(deltaX, deltaY, b1);
let tangVel2 = tangent_vel(deltaX, deltaY, b2);
//applying the 'momentum' function to these velocities to work out the post collison velocities
let xNormVels = momentum(normVel1.x, normVel2.x, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
let yNormVels = momentum(normVel1.y, normVel2.y, Math.pow(balls[b1].r, 2), Math.pow(balls[b2].r, 2));
//reassigning the post collision velocities
normVel1.x = xNormVels[0] * cor;
normVel2.x = xNormVels[1] * cor;
normVel1.y = yNormVels[0] * cor;
normVel2.y = yNormVels[1] * cor;
//setting the actual velocities of the balls to the sum of the normal and tangental velocities
balls[b1].vx = normVel1.x + tangVel1.x;
balls[b1].vy = normVel1.y + tangVel1.y;
balls[b2].vx = normVel2.x + tangVel2.x;
balls[b2].vy = normVel2.y + tangVel2.y;
}
}
}
}
function normal_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((-1 * deltaX * balls[b].vx) - (deltaY * balls[b].vy));
let nX = k * deltaX;
let nY = k * deltaY;
return {x: nX, y: nY};
}
function tangent_vel(deltaX, deltaY, b){
let k = (-1 / (deltaY * deltaY + deltaX * deltaX)) * ((deltaY * balls[b].vx) - (deltaX * balls[b].vy));
let tX = k * -1 * deltaY;
let tY = k * deltaX;
return {x: tX, y: tY};
}
function bollard_collisions(){
//SPLIT THIS FUNCTION TO AN INDIVIDUAL bollard_collision(ball_index) function
for (let i = 0; i < balls.length; i++){
let collided = false;
//nxtb is next ball
let nxtb = {x: balls[i].x + balls[i].vx * time_delta_s,
y: balls[i].y + balls[i].vy * time_delta_s,
r: balls[i].r}
for (let bollard of bollards){
switch (bollard.type){
case 'rect':
//array of [sidex, sidey, opp. sidex, opp. sidey]
let sides = [[0,1,3,2],[1,2,0,3],[2,3,1,0],[0,3,1,2]];
for (let side of sides){
let cs = side.map(c=>bollard.points[c]);
if (Math.abs(point_line_dist(nxtb, cs[0], cs[1])) < nxtb.r && //if hitting this side...
point_line_dist(nxtb, cs[0], cs[2]) * //and in between edge sides
point_line_dist(nxtb, cs[1], cs[3]) < 0) {//== dist(cs[0], cs[1])){
//see papers for calculations (var meanings) ~done on 19/1/19
let dx = cs[1].x - cs[0].x;
let dy = cs[1].y - cs[0].y;
let a = (dx*balls[i].vx+dy*balls[i].vy)/(dx*dx+dy*dy);
let b = (dy*balls[i].vx-dx*balls[i].vy)/(dx*dx+dy*dy);
let vel_tang = {x: dx * a, y: dy * a};
let vel_norm = {x: dy * b, y: -dx * b};
//reverse normal component
vel_norm.x *= -1;
vel_norm.y *= -1;
//reassign components
balls[i].vx = vel_tang.x + vel_norm.x;
balls[i].vy = vel_tang.y + vel_norm.y;
collided = true;
}
}
break;
case 'circle':
break;
}
if (collided) break;
}
if (collided) break;
}
return;
//////////////
for (let bol = 0; bol < bollards.length; bol++){
for (let bal = 0; bal < balls.length; bal++){
let bollard = bollards[bol];
let ball = balls[bal];
let next_ball = {x: ball.x + ball.vx * time_delta_s,
y: ball.y + ball.vy * time_delta_s,
r: ball.r}
if (overlap(ball, bollard)){
//asignning deltaX and deltaY for the positions of the balls
let deltaX = ball.x - bollard.x;
let deltaY = ball.y - bollard.y;
//initialising the current normal and tangental velocities to the collision for each ball
let normVel2 = normal_vel(deltaX, deltaY, bal);
let tangVel2 = tangent_vel(deltaX, deltaY, bal);
//applying the 'momentum' function to these velocities to work out the post colliison velocities
let xNormVels = momentum(0, normVel2.x, 100000000000000, ball.r);
let yNormVels = momentum(0, normVel2.y, 100000000000000, ball.r);
//reassigning the post collision velocities
normVel2.x = xNormVels[1] * cor;
normVel2.y = yNormVels[1] * cor;
//setting the actual velocities of the balls to the sum of the normal and tangental velocities
ball.vx = normVel2.x + tangVel2.x;
ball.vy = normVel2.y + tangVel2.y;
}
}
}
}
function point_line_dist(p, c1, c2){
//returns distance between p and the line with c1 and c2 on it
//see papers for calculations
return (-p.x*(c2.y-c1.y)+p.y*(c2.x-c1.x)+c1.x*c2.y-c2.x*c1.y)/dist(c1,c2);
}
//distance between two objects with x,y attrs.
function dist(c1, c2){
return ((c2.x - c1.x) ** 2 + (c2.y - c1.y) ** 2) ** 0.5;
}
//random float (incl. min, excl. max)
function rand_num(min, max){
return Math.random() * (max - min) + min;
}
//takes two objects with centres and radii and returns if the circles overlap
function overlap(b1, b2){
if (dist(b1, b2) < b1.r + b2.r){
return true;
}
return false;
}
//returns final velocities of masses after a 1d collision
function momentum(u1, u2, m1, m2){
//http://farside.ph.utexas.edu/teaching/301/lectures/node76.html
let v1 = (u1 * (m1 - m2) + 2 * m2 * u2) / (m1 + m2);
let v2 = (u2 * (m2 - m1) + 2 * m1 * u1) / (m1 + m2);
return [v1, v2];
}
//draws a circle on the main canvas
function draw_circle(x, y, radius, color){
//color = ('0' + (x / cnvs.width) * 0xff).toString(16).substr(-2).repeat(3);
ctx.beginPath();
ctx.arc(x, y, radius, 0, Math.PI * 2);
ctx.fillStyle = color;
ctx.fill();
}
//blanks the canvas for next update
function clear_screen(){
if (tracing) return;
ctx.clearRect(0, 0, cnvs.width, cnvs.height);
}
function fill_polygon(points, color) | {
ctx.beginPath();
ctx.moveTo(points[0].x, points[0].y);
for (let i = 1; i < points.length; i++){
ctx.lineTo(points[i].x, points[i].y);
}
ctx.stroke();
ctx.fillStyle = color;
ctx.fill();
} | identifier_body |
|
freelist.rs | 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd != 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t .. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if !self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 | else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if !pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f | {
self.ids.clear();
} | conditional_block |
freelist.rs | 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd != 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t .. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if !self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) | } else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if !pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f | {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr); | identifier_body |
freelist.rs | let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if !pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = FreeList::new();
let page1 = Rc::new(RefCell::new(Page {
id: 12,
flags: 0,
count: 0,
overflow: 1,
ptr: 0,
}));
f.free(100, Rc::clone(&page1));
let page2 = Rc::new(RefCell::new(Page {
id: 9,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page2));
let page3 = Rc::new(RefCell::new(Page {
id: 39,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(102, Rc::clone(&page3));
f.release(100);
f.release(101);
assert_eq!(f.ids, vec![9,12,13]);
f.release(102);
assert_eq!(f.ids, vec![9,12,13, 39]);
}
#[test]
fn freelist_allocate() {
let mut f = FreeList {
ids: vec![3,4,5,6,7,9,12,13,18],
pending: HashMap::new(),
cache: HashSet::new(),
};
assert_eq!(f.allocate(3), 3);
assert_eq!(f.allocate(1), 6);
assert_eq!(f.allocate(3), 0);
assert_eq!(f.allocate(2), 12);
assert_eq!(f.allocate(1), 7);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.ids, vec![9,18]);
assert_eq!(f.allocate(1), 9);
assert_eq!(f.allocate(1), 18);
assert_eq!(f.allocate(1), 0);
assert_eq!(f.ids, vec![]);
}
#[test]
fn | freelist_read | identifier_name |
|
freelist.rs | 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd != 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t .. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if !self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex(); | // writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if !pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = | }
| random_line_split |
basic_agent.py | a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
import math
from typing import List
import carla
from carla.libcarla import ActorList, Actor
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.tools.misc import get_nearest_traffic_light, get_speed
class BasicAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, target_speed=20):
| self._target_speed = target_speed
self._grp = None
self.drawn_lights = False
self.is_affected_by_traffic_light = False
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(
carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(
start_waypoint.transform.location,
end_waypoint.transform.location)
return route
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors() # type: ActorList
vehicle_list = actor_list.filter("*vehicle*") # type: List[Actor]
pedestrians_list = actor_list.filter("*walker.pedestrian*")
lights_list = actor_list.filter("*traffic_light*") # type: List[carla.TrafficLight]
if not self.drawn_lights and debug:
for light in lights_list:
self._world.debug.draw_box(
carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,
light.trigger_volume.extent * 2),
carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)
self.drawn_lights = True
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# Check for pedestrians
pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)
if pedestrian_state:
if debug:
print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
new_target_speed = self._update_target_speed(hazard_detected, debug)
# if hazard_detected:
# control = self.emergency_stop()
# else:
# self._state = AgentState.NAVIGATING
# self.braking_intial_speed = None
# # standard local planner behavior
# control = self._local_planner.run_step(debug=debug)
# if self.stopping_for_traffic_light:
# control.steer = 0.0
self._state = AgentState.NAVIGATING
self.braking_intial_speed = None
# standard local planner behavior
control = self._local_planner.run_step(debug=debug)
if self.stopping_for_traffic_light:
control.steer = 0.0
# Prevent from steering randomly when stopped
if math.fabs(get_speed(self._vehicle)) < 0.1:
control.steer = 0
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
def _update_target_speed(self, hazard_detected, debug):
if hazard_detected:
self._set_target_speed(0)
return 0
MAX_PERCENTAGE_OF_SPEED_LIMIT = 0.75
speed_limit = self._vehicle.get_speed_limit() # km/h
current_speed = get_speed(self._vehicle)
new_target_speed = speed_limit * MAX_PERCENTAGE_OF_SPEED_LIMIT
use_custom_traffic_light_speed = False
if use_custom_traffic_light_speed:
TRAFFIC_LIGHT_SECONDS_AWAY = 3
METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT = 8
get_traffic_light = self._vehicle.get_traffic_light() # type: carla.TrafficLight
nearest_traffic_light, distance = get_nearest_traffic_light(self._vehicle) # type: carla.TrafficLight, float
distance_to_light = distance
distance -= METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT
if nearest_traffic_light is None:
nearest_traffic_light = get_traffic_light
# Draw debug info
if debug and nearest_traffic_light is not None:
self._world.debug.draw_point(
nearest_traffic_light.get_transform().location,
size=1,
life_time=0.1,
color=carla.Color(255, 15, 15))
"""
if get_traffic_light is not None:
print("get_traffic_light: ", get_traffic_light.get_location() if get_traffic_light is not None else "None", " ", get_traffic_light.state if get_traffic_light is not None else "None")
if nearest_traffic_light is not None:
print("nearest_traffic_light: ", nearest_traffic_light.get_location() if nearest_traffic_light is not None else "None", " ", nearest_traffic_light.state if nearest_traffic_light is not None else "None")
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
self.is_affected_by_traffic_light = False
self.stopping_for_traffic_light = False
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
pass
# Check if we should start braking
elif distance_to_light <= TRAFFIC_LIGHT_SECONDS_AWAY * new_target_speed / 3.6 and nearest_traffic_light is not None and nearest_traffic_light.state != carla.TrafficLightState.Green:
self.is_affected_by_traffic_light = True
brake_distance = current_speed / 3.6 * TRAFFIC_LIGHT_SECONDS_AWAY
print("TL distance: ", distance_to_light, ", distance (to stop): ", distance, ", distance travel 4 secs: ", brake_distance)
new_target_speed = self._target_speed
if distance <= 0:
new_target_speed = 0
self.stopping_for_traffic_light = True
print("Stopping before traffic light, distance ", distance, "m")
elif brake_distance >= distance and brake_distance != 0:
percent_before_light = (brake_distance - distance) / brake_distance
new_target_speed = speed_limit | """
:param vehicle: actor to apply to local planner logic onto
"""
super(BasicAgent, self).__init__(vehicle)
self.stopping_for_traffic_light = False
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.75,
'K_D': 0.001,
'K_I': 1,
'dt': 1.0 / 20.0}
self._local_planner = LocalPlanner(
self._vehicle, opt_dict={'target_speed': target_speed,
'lateral_control_dict': args_lateral_dict})
self._hop_resolution = 2.0
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5 | identifier_body |
basic_agent.py | a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
import math
from typing import List
import carla
from carla.libcarla import ActorList, Actor
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.tools.misc import get_nearest_traffic_light, get_speed
class BasicAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, target_speed=20):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(BasicAgent, self).__init__(vehicle)
self.stopping_for_traffic_light = False
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.75,
'K_D': 0.001,
'K_I': 1,
'dt': 1.0 / 20.0}
self._local_planner = LocalPlanner(
self._vehicle, opt_dict={'target_speed': target_speed,
'lateral_control_dict': args_lateral_dict})
self._hop_resolution = 2.0
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5
self._target_speed = target_speed
self._grp = None
self.drawn_lights = False
self.is_affected_by_traffic_light = False
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(
carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(
start_waypoint.transform.location,
end_waypoint.transform.location)
return route
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors() # type: ActorList
vehicle_list = actor_list.filter("*vehicle*") # type: List[Actor]
pedestrians_list = actor_list.filter("*walker.pedestrian*")
lights_list = actor_list.filter("*traffic_light*") # type: List[carla.TrafficLight]
if not self.drawn_lights and debug:
for light in lights_list:
self._world.debug.draw_box(
carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,
light.trigger_volume.extent * 2),
carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)
self.drawn_lights = True
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# Check for pedestrians
pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)
if pedestrian_state:
if debug:
print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
new_target_speed = self._update_target_speed(hazard_detected, debug)
# if hazard_detected:
# control = self.emergency_stop()
# else:
# self._state = AgentState.NAVIGATING
# self.braking_intial_speed = None
# # standard local planner behavior
# control = self._local_planner.run_step(debug=debug)
# if self.stopping_for_traffic_light:
# control.steer = 0.0 |
self._state = AgentState.NAVIGATING
self.braking_intial_speed = None
# standard local planner behavior
control = self._local_planner.run_step(debug=debug)
if self.stopping_for_traffic_light:
control.steer = 0.0
# Prevent from steering randomly when stopped
if math.fabs(get_speed(self._vehicle)) < 0.1:
control.steer = 0
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
def _update_target_speed(self, hazard_detected, debug):
if hazard_detected:
self._set_target_speed(0)
return 0
MAX_PERCENTAGE_OF_SPEED_LIMIT = 0.75
speed_limit = self._vehicle.get_speed_limit() # km/h
current_speed = get_speed(self._vehicle)
new_target_speed = speed_limit * MAX_PERCENTAGE_OF_SPEED_LIMIT
use_custom_traffic_light_speed = False
if use_custom_traffic_light_speed:
TRAFFIC_LIGHT_SECONDS_AWAY = 3
METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT = 8
get_traffic_light = self._vehicle.get_traffic_light() # type: carla.TrafficLight
nearest_traffic_light, distance = get_nearest_traffic_light(self._vehicle) # type: carla.TrafficLight, float
distance_to_light = distance
distance -= METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT
if nearest_traffic_light is None:
nearest_traffic_light = get_traffic_light
# Draw debug info
if debug and nearest_traffic_light is not None:
self._world.debug.draw_point(
nearest_traffic_light.get_transform().location,
size=1,
life_time=0.1,
color=carla.Color(255, 15, 15))
"""
if get_traffic_light is not None:
print("get_traffic_light: ", get_traffic_light.get_location() if get_traffic_light is not None else "None", " ", get_traffic_light.state if get_traffic_light is not None else "None")
if nearest_traffic_light is not None:
print("nearest_traffic_light: ", nearest_traffic_light.get_location() if nearest_traffic_light is not None else "None", " ", nearest_traffic_light.state if nearest_traffic_light is not None else "None")
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
self.is_affected_by_traffic_light = False
self.stopping_for_traffic_light = False
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
pass
# Check if we should start braking
elif distance_to_light <= TRAFFIC_LIGHT_SECONDS_AWAY * new_target_speed / 3.6 and nearest_traffic_light is not None and nearest_traffic_light.state != carla.TrafficLightState.Green:
self.is_affected_by_traffic_light = True
brake_distance = current_speed / 3.6 * TRAFFIC_LIGHT_SECONDS_AWAY
print("TL distance: ", distance_to_light, ", distance (to stop): ", distance, ", distance travel 4 secs: ", brake_distance)
new_target_speed = self._target_speed
if distance <= 0:
new_target_speed = 0
self.stopping_for_traffic_light = True
print("Stopping before traffic light, distance ", distance, "m")
elif brake_distance >= distance and brake_distance != 0:
percent_before_light = (brake_distance - distance) / brake_distance
new_target_speed = speed | random_line_split |
|
basic_agent.py | a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
import math
from typing import List
import carla
from carla.libcarla import ActorList, Actor
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.tools.misc import get_nearest_traffic_light, get_speed
class BasicAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, target_speed=20):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(BasicAgent, self).__init__(vehicle)
self.stopping_for_traffic_light = False
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.75,
'K_D': 0.001,
'K_I': 1,
'dt': 1.0 / 20.0}
self._local_planner = LocalPlanner(
self._vehicle, opt_dict={'target_speed': target_speed,
'lateral_control_dict': args_lateral_dict})
self._hop_resolution = 2.0
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5
self._target_speed = target_speed
self._grp = None
self.drawn_lights = False
self.is_affected_by_traffic_light = False
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(
carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(
start_waypoint.transform.location,
end_waypoint.transform.location)
return route
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors() # type: ActorList
vehicle_list = actor_list.filter("*vehicle*") # type: List[Actor]
pedestrians_list = actor_list.filter("*walker.pedestrian*")
lights_list = actor_list.filter("*traffic_light*") # type: List[carla.TrafficLight]
if not self.drawn_lights and debug:
for light in lights_list:
self._world.debug.draw_box(
carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,
light.trigger_volume.extent * 2),
carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)
self.drawn_lights = True
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# Check for pedestrians
pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)
if pedestrian_state:
if debug:
print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
new_target_speed = self._update_target_speed(hazard_detected, debug)
# if hazard_detected:
# control = self.emergency_stop()
# else:
# self._state = AgentState.NAVIGATING
# self.braking_intial_speed = None
# # standard local planner behavior
# control = self._local_planner.run_step(debug=debug)
# if self.stopping_for_traffic_light:
# control.steer = 0.0
self._state = AgentState.NAVIGATING
self.braking_intial_speed = None
# standard local planner behavior
control = self._local_planner.run_step(debug=debug)
if self.stopping_for_traffic_light:
control.steer = 0.0
# Prevent from steering randomly when stopped
if math.fabs(get_speed(self._vehicle)) < 0.1:
control.steer = 0
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
def _update_target_speed(self, hazard_detected, debug):
if hazard_detected:
self._set_target_speed(0)
return 0
MAX_PERCENTAGE_OF_SPEED_LIMIT = 0.75
speed_limit = self._vehicle.get_speed_limit() # km/h
current_speed = get_speed(self._vehicle)
new_target_speed = speed_limit * MAX_PERCENTAGE_OF_SPEED_LIMIT
use_custom_traffic_light_speed = False
if use_custom_traffic_light_speed:
|
if nearest_traffic_light is not None:
print("nearest_traffic_light: ", nearest_traffic_light.get_location() if nearest_traffic_light is not None else "None", " ", nearest_traffic_light.state if nearest_traffic_light is not None else "None")
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
self.is_affected_by_traffic_light = False
self.stopping_for_traffic_light = False
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
pass
# Check if we should start braking
elif distance_to_light <= TRAFFIC_LIGHT_SECONDS_AWAY * new_target_speed / 3.6 and nearest_traffic_light is not None and nearest_traffic_light.state != carla.TrafficLightState.Green:
self.is_affected_by_traffic_light = True
brake_distance = current_speed / 3.6 * TRAFFIC_LIGHT_SECONDS_AWAY
print("TL distance: ", distance_to_light, ", distance (to stop): ", distance, ", distance travel 4 secs: ", brake_distance)
new_target_speed = self._target_speed
if distance <= 0:
new_target_speed = 0
self.stopping_for_traffic_light = True
print("Stopping before traffic light, distance ", distance, "m")
elif brake_distance >= distance and brake_distance != 0:
percent_before_light = (brake_distance - distance) / brake_distance
new_target_speed = speed | TRAFFIC_LIGHT_SECONDS_AWAY = 3
METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT = 8
get_traffic_light = self._vehicle.get_traffic_light() # type: carla.TrafficLight
nearest_traffic_light, distance = get_nearest_traffic_light(self._vehicle) # type: carla.TrafficLight, float
distance_to_light = distance
distance -= METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT
if nearest_traffic_light is None:
nearest_traffic_light = get_traffic_light
# Draw debug info
if debug and nearest_traffic_light is not None:
self._world.debug.draw_point(
nearest_traffic_light.get_transform().location,
size=1,
life_time=0.1,
color=carla.Color(255, 15, 15))
"""
if get_traffic_light is not None:
print("get_traffic_light: ", get_traffic_light.get_location() if get_traffic_light is not None else "None", " ", get_traffic_light.state if get_traffic_light is not None else "None") | conditional_block |
basic_agent.py | copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
import math
from typing import List
import carla
from carla.libcarla import ActorList, Actor
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.tools.misc import get_nearest_traffic_light, get_speed
class | (Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, target_speed=20):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(BasicAgent, self).__init__(vehicle)
self.stopping_for_traffic_light = False
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.75,
'K_D': 0.001,
'K_I': 1,
'dt': 1.0 / 20.0}
self._local_planner = LocalPlanner(
self._vehicle, opt_dict={'target_speed': target_speed,
'lateral_control_dict': args_lateral_dict})
self._hop_resolution = 2.0
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5
self._target_speed = target_speed
self._grp = None
self.drawn_lights = False
self.is_affected_by_traffic_light = False
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(
carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(
start_waypoint.transform.location,
end_waypoint.transform.location)
return route
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors() # type: ActorList
vehicle_list = actor_list.filter("*vehicle*") # type: List[Actor]
pedestrians_list = actor_list.filter("*walker.pedestrian*")
lights_list = actor_list.filter("*traffic_light*") # type: List[carla.TrafficLight]
if not self.drawn_lights and debug:
for light in lights_list:
self._world.debug.draw_box(
carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,
light.trigger_volume.extent * 2),
carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)
self.drawn_lights = True
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# Check for pedestrians
pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)
if pedestrian_state:
if debug:
print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
new_target_speed = self._update_target_speed(hazard_detected, debug)
# if hazard_detected:
# control = self.emergency_stop()
# else:
# self._state = AgentState.NAVIGATING
# self.braking_intial_speed = None
# # standard local planner behavior
# control = self._local_planner.run_step(debug=debug)
# if self.stopping_for_traffic_light:
# control.steer = 0.0
self._state = AgentState.NAVIGATING
self.braking_intial_speed = None
# standard local planner behavior
control = self._local_planner.run_step(debug=debug)
if self.stopping_for_traffic_light:
control.steer = 0.0
# Prevent from steering randomly when stopped
if math.fabs(get_speed(self._vehicle)) < 0.1:
control.steer = 0
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
def _update_target_speed(self, hazard_detected, debug):
if hazard_detected:
self._set_target_speed(0)
return 0
MAX_PERCENTAGE_OF_SPEED_LIMIT = 0.75
speed_limit = self._vehicle.get_speed_limit() # km/h
current_speed = get_speed(self._vehicle)
new_target_speed = speed_limit * MAX_PERCENTAGE_OF_SPEED_LIMIT
use_custom_traffic_light_speed = False
if use_custom_traffic_light_speed:
TRAFFIC_LIGHT_SECONDS_AWAY = 3
METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT = 8
get_traffic_light = self._vehicle.get_traffic_light() # type: carla.TrafficLight
nearest_traffic_light, distance = get_nearest_traffic_light(self._vehicle) # type: carla.TrafficLight, float
distance_to_light = distance
distance -= METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT
if nearest_traffic_light is None:
nearest_traffic_light = get_traffic_light
# Draw debug info
if debug and nearest_traffic_light is not None:
self._world.debug.draw_point(
nearest_traffic_light.get_transform().location,
size=1,
life_time=0.1,
color=carla.Color(255, 15, 15))
"""
if get_traffic_light is not None:
print("get_traffic_light: ", get_traffic_light.get_location() if get_traffic_light is not None else "None", " ", get_traffic_light.state if get_traffic_light is not None else "None")
if nearest_traffic_light is not None:
print("nearest_traffic_light: ", nearest_traffic_light.get_location() if nearest_traffic_light is not None else "None", " ", nearest_traffic_light.state if nearest_traffic_light is not None else "None")
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
self.is_affected_by_traffic_light = False
self.stopping_for_traffic_light = False
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
pass
# Check if we should start braking
elif distance_to_light <= TRAFFIC_LIGHT_SECONDS_AWAY * new_target_speed / 3.6 and nearest_traffic_light is not None and nearest_traffic_light.state != carla.TrafficLightState.Green:
self.is_affected_by_traffic_light = True
brake_distance = current_speed / 3.6 * TRAFFIC_LIGHT_SECONDS_AWAY
print("TL distance: ", distance_to_light, ", distance (to stop): ", distance, ", distance travel 4 secs: ", brake_distance)
new_target_speed = self._target_speed
if distance <= 0:
new_target_speed = 0
self.stopping_for_traffic_light = True
print("Stopping before traffic light, distance ", distance, "m")
elif brake_distance >= distance and brake_distance != 0:
percent_before_light = (brake_distance - distance) / brake_distance
new_target_speed = speed | BasicAgent | identifier_name |
APISchedFetch.js | /postgame) data
loading:true,
records:[],
mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
};
this.sideBarClick = this.sideBarClick.bind(this);
this.refreshData = this.refreshData.bind(this);
this.handleDateChange = this.handleDateChange.bind(this);
}
refreshData(date = this.state.date) {
// let dateTest = '?date=2019-03-09';
// let fetchDate = date || this.state.date
let dateTest = '';
fetch('https://statsapi.web.nhl.com/api/v1/schedule?date='+date) // fetching the scheduled games from the NHL API
.then(schedResults => {
return schedResults.json();
}).then(data => {
// if there are no games, stop loading, leave the function, and hide the sidebar
if (data.dates.length === 0) {
this.setState({
loading: false
// mobileActive: 'gameView'
})
return
};
let liveGames = [];
let scheduledGames = [];
let finalGames = [];
let allGames = [];
let allGamesJSON = [];
let gamesContentData = [];
let records = [];
var fetches = [];
// looping through the scheduled games for the day
for (let i = 0, j = data.dates[0].games.length; i < j; i++) {
let iterGame = data.dates[0].games[i];
let homeTeam = iterGame.teams.home;
let awayTeam = iterGame.teams.away;
let gameState = iterGame.status.detailedState;
gameState = gameState.toLowerCase().replace(/\s/g, '');
let gamePk = iterGame.gamePk;
let gameRecords = {
gamePk: gamePk,
home: homeTeam.leagueRecord,
away: awayTeam.leagueRecord
}
records.push(gameRecords);
let apiString = 'https://statsapi.web.nhl.com//api/v1/game/' + gamePk + '/feed/live';
let apiContentString = 'https://statsapi.web.nhl.com/api/v1/game/' + gamePk + '/content'
// fetch live game data for each game; each fetch is added to an array of Promises to be sure all data is fetched before parsing
fetches.push(
fetch(apiString)
.then(gameResults => {
return gameResults.json();
}).then(gameData => {
if (gameState.search('progress') !== -1) {
liveGames = liveGames.concat(gameData);
} else if ((gameState.search('scheduled') !== -1) || (gameState.search('pre-game') !== -1)) {
scheduledGames = scheduledGames.concat(gameData);
} else if (gameState.search('final') !== -1) {
finalGames = finalGames.concat(gameData);
}
allGames = [liveGames,scheduledGames,finalGames,data];
allGamesJSON = allGamesJSON.concat(gameData);
})
);
// content data is available at a different endpoint and needs its own fetch
fetches.push(
fetch(apiContentString)
.then(contentRaw => {
return contentRaw.json();
}).then(content => {
gamesContentData = gamesContentData.concat(content);
})
)
};
Promise.all(fetches).then(() => {
// all promises resolved so now we can update the data in the app (but we need to sort it as Promises are not necessarily resolved in order)
sortByKey(liveGames,'gamePk');
sortByKey(scheduledGames,'gamePk');
sortByKey(finalGames,'gamePk');
sortByKey(allGamesJSON,'gamePk');
// if no main game is set, default to the first one
if (this.state.mainGamePk === "") {
let firstGamePk = allGames[3].dates[0].games[0].gamePk;
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
mainGamePk:firstGamePk, | } else {
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
}
})
.catch(err => {return console.log(err);});
})
}
componentDidMount() {
this.refreshData();
this._interval = window.setInterval(this.refreshData,10000); // set refresh interval to 5s
}
componentWillUnMount() {
this._interval && window.clearInterval(this._interval); // remove refresh interval when unmounted
}
sideBarClick(gameFID) {
this.setState({mainGamePk: gameFID, mobileActive:'gameView'}); // handles sidebar game click
}
backButtonClick() {
this.setState({mobileActive:'list'}); // used for mobile to provide back button functionality
}
handleDateChange(date) {
this.setState({
date:date,
liveGames: [], // array of game data for live games
scheduledGames: [], // array of game data for scheduled games
finalGames:[], // array of game data for ended games
mainGamePk: "", // gamePk (id) of the game to be displayed in the main game area
gamesData:[], // array of all game data
gamesContentData:[], // array of all content (pregame/postgame) data
loading:true,
records:[],
// mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
})
this.refreshData(date)
// this.setState({date:date})
}
render() {
let activeGameVar = this.state.mainGamePk;
// grab the game data to be displayed for the selected sidebar game
let tg = this.state.gamesData;
let activeGameData = tg.find(obj => {
return obj.gamePk == activeGameVar
});
// grab the preview/postgame data to be displayed for the selected sidebar game
let tc = this.state.gamesContentData;
let activeGameContent = tc.find(obj => {
return obj.link === '/api/v1/game/' + activeGameVar + '/content'
});
let r = this.state.records;
let activeRecords = r.find(obj => {
return obj.gamePk === activeGameVar
});
// if content is loading display the bounce loader in the main game area
let mainGameArea = ''
let sideBarArea = ''
if (this.state.loading) {
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<p>Loading...</p>
</div>
<div className="disclaimer">
<p>This website is not in any way affiliated with the National Hockey League (NHL) or any of its respective teams. The NHL logo, team logos, team names, and other trademarks/copyrighted images are the property of their respective owners.</p>
<p>If you are the owner of a trademark/copyrighted material that is used on this website and would like it removed, please <a href="mailto:[email protected]?Subject=Trademark/Copyright%20Issue">contact me</a>.</p>
</div>
</div>
);
mainGameArea = (
<BounceLoader
sizeUnit={"px"}
size={50}
color={'#262626'}
loading={this.state.loading}
/>
);
} else {
if (this.state.gamesData.length !== 0) {
mainGameArea = <ActiveGameArea backButtonClick={() => this.backButtonClick()} gameID={activeGameVar} data={activeGameData} content={activeGameContent} records={activeRecords} mobileActive={this.state.mobileActive}/>
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<div className="gamesContainer live">
{
this.state.liveGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer sched">
{
this.state.scheduledGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer final">
{
this.state.finalGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
| gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
}); | random_line_split |
APISchedFetch.js | game) data
loading:true,
records:[],
mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
};
this.sideBarClick = this.sideBarClick.bind(this);
this.refreshData = this.refreshData.bind(this);
this.handleDateChange = this.handleDateChange.bind(this);
}
refreshData(date = this.state.date) {
// let dateTest = '?date=2019-03-09';
// let fetchDate = date || this.state.date
let dateTest = '';
fetch('https://statsapi.web.nhl.com/api/v1/schedule?date='+date) // fetching the scheduled games from the NHL API
.then(schedResults => {
return schedResults.json();
}).then(data => {
// if there are no games, stop loading, leave the function, and hide the sidebar
if (data.dates.length === 0) {
this.setState({
loading: false
// mobileActive: 'gameView'
})
return
};
let liveGames = [];
let scheduledGames = [];
let finalGames = [];
let allGames = [];
let allGamesJSON = [];
let gamesContentData = [];
let records = [];
var fetches = [];
// looping through the scheduled games for the day
for (let i = 0, j = data.dates[0].games.length; i < j; i++) {
let iterGame = data.dates[0].games[i];
let homeTeam = iterGame.teams.home;
let awayTeam = iterGame.teams.away;
let gameState = iterGame.status.detailedState;
gameState = gameState.toLowerCase().replace(/\s/g, '');
let gamePk = iterGame.gamePk;
let gameRecords = {
gamePk: gamePk,
home: homeTeam.leagueRecord,
away: awayTeam.leagueRecord
}
records.push(gameRecords);
let apiString = 'https://statsapi.web.nhl.com//api/v1/game/' + gamePk + '/feed/live';
let apiContentString = 'https://statsapi.web.nhl.com/api/v1/game/' + gamePk + '/content'
// fetch live game data for each game; each fetch is added to an array of Promises to be sure all data is fetched before parsing
fetches.push(
fetch(apiString)
.then(gameResults => {
return gameResults.json();
}).then(gameData => {
if (gameState.search('progress') !== -1) {
liveGames = liveGames.concat(gameData);
} else if ((gameState.search('scheduled') !== -1) || (gameState.search('pre-game') !== -1)) {
scheduledGames = scheduledGames.concat(gameData);
} else if (gameState.search('final') !== -1) {
finalGames = finalGames.concat(gameData);
}
allGames = [liveGames,scheduledGames,finalGames,data];
allGamesJSON = allGamesJSON.concat(gameData);
})
);
// content data is available at a different endpoint and needs its own fetch
fetches.push(
fetch(apiContentString)
.then(contentRaw => {
return contentRaw.json();
}).then(content => {
gamesContentData = gamesContentData.concat(content);
})
)
};
Promise.all(fetches).then(() => {
// all promises resolved so now we can update the data in the app (but we need to sort it as Promises are not necessarily resolved in order)
sortByKey(liveGames,'gamePk');
sortByKey(scheduledGames,'gamePk');
sortByKey(finalGames,'gamePk');
sortByKey(allGamesJSON,'gamePk');
// if no main game is set, default to the first one
if (this.state.mainGamePk === "") {
let firstGamePk = allGames[3].dates[0].games[0].gamePk;
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
mainGamePk:firstGamePk,
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
} else {
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
}
})
.catch(err => {return console.log(err);});
})
}
| () {
this.refreshData();
this._interval = window.setInterval(this.refreshData,10000); // set refresh interval to 5s
}
componentWillUnMount() {
this._interval && window.clearInterval(this._interval); // remove refresh interval when unmounted
}
sideBarClick(gameFID) {
this.setState({mainGamePk: gameFID, mobileActive:'gameView'}); // handles sidebar game click
}
backButtonClick() {
this.setState({mobileActive:'list'}); // used for mobile to provide back button functionality
}
handleDateChange(date) {
this.setState({
date:date,
liveGames: [], // array of game data for live games
scheduledGames: [], // array of game data for scheduled games
finalGames:[], // array of game data for ended games
mainGamePk: "", // gamePk (id) of the game to be displayed in the main game area
gamesData:[], // array of all game data
gamesContentData:[], // array of all content (pregame/postgame) data
loading:true,
records:[],
// mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
})
this.refreshData(date)
// this.setState({date:date})
}
render() {
let activeGameVar = this.state.mainGamePk;
// grab the game data to be displayed for the selected sidebar game
let tg = this.state.gamesData;
let activeGameData = tg.find(obj => {
return obj.gamePk == activeGameVar
});
// grab the preview/postgame data to be displayed for the selected sidebar game
let tc = this.state.gamesContentData;
let activeGameContent = tc.find(obj => {
return obj.link === '/api/v1/game/' + activeGameVar + '/content'
});
let r = this.state.records;
let activeRecords = r.find(obj => {
return obj.gamePk === activeGameVar
});
// if content is loading display the bounce loader in the main game area
let mainGameArea = ''
let sideBarArea = ''
if (this.state.loading) {
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<p>Loading...</p>
</div>
<div className="disclaimer">
<p>This website is not in any way affiliated with the National Hockey League (NHL) or any of its respective teams. The NHL logo, team logos, team names, and other trademarks/copyrighted images are the property of their respective owners.</p>
<p>If you are the owner of a trademark/copyrighted material that is used on this website and would like it removed, please <a href="mailto:[email protected]?Subject=Trademark/Copyright%20Issue">contact me</a>.</p>
</div>
</div>
);
mainGameArea = (
<BounceLoader
sizeUnit={"px"}
size={50}
color={'#262626'}
loading={this.state.loading}
/>
);
} else {
if (this.state.gamesData.length !== 0) {
mainGameArea = <ActiveGameArea backButtonClick={() => this.backButtonClick()} gameID={activeGameVar} data={activeGameData} content={activeGameContent} records={activeRecords} mobileActive={this.state.mobileActive}/>
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<div className="gamesContainer live">
{
this.state.liveGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer sched">
{
this.state.scheduledGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer final">
{
this.state.finalGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} | componentDidMount | identifier_name |
APISchedFetch.js | game) data
loading:true,
records:[],
mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
};
this.sideBarClick = this.sideBarClick.bind(this);
this.refreshData = this.refreshData.bind(this);
this.handleDateChange = this.handleDateChange.bind(this);
}
refreshData(date = this.state.date) {
// let dateTest = '?date=2019-03-09';
// let fetchDate = date || this.state.date
let dateTest = '';
fetch('https://statsapi.web.nhl.com/api/v1/schedule?date='+date) // fetching the scheduled games from the NHL API
.then(schedResults => {
return schedResults.json();
}).then(data => {
// if there are no games, stop loading, leave the function, and hide the sidebar
if (data.dates.length === 0) {
this.setState({
loading: false
// mobileActive: 'gameView'
})
return
};
let liveGames = [];
let scheduledGames = [];
let finalGames = [];
let allGames = [];
let allGamesJSON = [];
let gamesContentData = [];
let records = [];
var fetches = [];
// looping through the scheduled games for the day
for (let i = 0, j = data.dates[0].games.length; i < j; i++) {
let iterGame = data.dates[0].games[i];
let homeTeam = iterGame.teams.home;
let awayTeam = iterGame.teams.away;
let gameState = iterGame.status.detailedState;
gameState = gameState.toLowerCase().replace(/\s/g, '');
let gamePk = iterGame.gamePk;
let gameRecords = {
gamePk: gamePk,
home: homeTeam.leagueRecord,
away: awayTeam.leagueRecord
}
records.push(gameRecords);
let apiString = 'https://statsapi.web.nhl.com//api/v1/game/' + gamePk + '/feed/live';
let apiContentString = 'https://statsapi.web.nhl.com/api/v1/game/' + gamePk + '/content'
// fetch live game data for each game; each fetch is added to an array of Promises to be sure all data is fetched before parsing
fetches.push(
fetch(apiString)
.then(gameResults => {
return gameResults.json();
}).then(gameData => {
if (gameState.search('progress') !== -1) {
liveGames = liveGames.concat(gameData);
} else if ((gameState.search('scheduled') !== -1) || (gameState.search('pre-game') !== -1)) {
scheduledGames = scheduledGames.concat(gameData);
} else if (gameState.search('final') !== -1) |
allGames = [liveGames,scheduledGames,finalGames,data];
allGamesJSON = allGamesJSON.concat(gameData);
})
);
// content data is available at a different endpoint and needs its own fetch
fetches.push(
fetch(apiContentString)
.then(contentRaw => {
return contentRaw.json();
}).then(content => {
gamesContentData = gamesContentData.concat(content);
})
)
};
Promise.all(fetches).then(() => {
// all promises resolved so now we can update the data in the app (but we need to sort it as Promises are not necessarily resolved in order)
sortByKey(liveGames,'gamePk');
sortByKey(scheduledGames,'gamePk');
sortByKey(finalGames,'gamePk');
sortByKey(allGamesJSON,'gamePk');
// if no main game is set, default to the first one
if (this.state.mainGamePk === "") {
let firstGamePk = allGames[3].dates[0].games[0].gamePk;
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
mainGamePk:firstGamePk,
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
} else {
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
}
})
.catch(err => {return console.log(err);});
})
}
componentDidMount() {
this.refreshData();
this._interval = window.setInterval(this.refreshData,10000); // set refresh interval to 5s
}
componentWillUnMount() {
this._interval && window.clearInterval(this._interval); // remove refresh interval when unmounted
}
sideBarClick(gameFID) {
this.setState({mainGamePk: gameFID, mobileActive:'gameView'}); // handles sidebar game click
}
backButtonClick() {
this.setState({mobileActive:'list'}); // used for mobile to provide back button functionality
}
handleDateChange(date) {
this.setState({
date:date,
liveGames: [], // array of game data for live games
scheduledGames: [], // array of game data for scheduled games
finalGames:[], // array of game data for ended games
mainGamePk: "", // gamePk (id) of the game to be displayed in the main game area
gamesData:[], // array of all game data
gamesContentData:[], // array of all content (pregame/postgame) data
loading:true,
records:[],
// mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
})
this.refreshData(date)
// this.setState({date:date})
}
render() {
let activeGameVar = this.state.mainGamePk;
// grab the game data to be displayed for the selected sidebar game
let tg = this.state.gamesData;
let activeGameData = tg.find(obj => {
return obj.gamePk == activeGameVar
});
// grab the preview/postgame data to be displayed for the selected sidebar game
let tc = this.state.gamesContentData;
let activeGameContent = tc.find(obj => {
return obj.link === '/api/v1/game/' + activeGameVar + '/content'
});
let r = this.state.records;
let activeRecords = r.find(obj => {
return obj.gamePk === activeGameVar
});
// if content is loading display the bounce loader in the main game area
let mainGameArea = ''
let sideBarArea = ''
if (this.state.loading) {
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<p>Loading...</p>
</div>
<div className="disclaimer">
<p>This website is not in any way affiliated with the National Hockey League (NHL) or any of its respective teams. The NHL logo, team logos, team names, and other trademarks/copyrighted images are the property of their respective owners.</p>
<p>If you are the owner of a trademark/copyrighted material that is used on this website and would like it removed, please <a href="mailto:[email protected]?Subject=Trademark/Copyright%20Issue">contact me</a>.</p>
</div>
</div>
);
mainGameArea = (
<BounceLoader
sizeUnit={"px"}
size={50}
color={'#262626'}
loading={this.state.loading}
/>
);
} else {
if (this.state.gamesData.length !== 0) {
mainGameArea = <ActiveGameArea backButtonClick={() => this.backButtonClick()} gameID={activeGameVar} data={activeGameData} content={activeGameContent} records={activeRecords} mobileActive={this.state.mobileActive}/>
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<div className="gamesContainer live">
{
this.state.liveGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer sched">
{
this.state.scheduledGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer final">
{
this.state.finalGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} | {
finalGames = finalGames.concat(gameData);
} | conditional_block |
APISchedFetch.js | game) data
loading:true,
records:[],
mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
};
this.sideBarClick = this.sideBarClick.bind(this);
this.refreshData = this.refreshData.bind(this);
this.handleDateChange = this.handleDateChange.bind(this);
}
refreshData(date = this.state.date) {
// let dateTest = '?date=2019-03-09';
// let fetchDate = date || this.state.date
let dateTest = '';
fetch('https://statsapi.web.nhl.com/api/v1/schedule?date='+date) // fetching the scheduled games from the NHL API
.then(schedResults => {
return schedResults.json();
}).then(data => {
// if there are no games, stop loading, leave the function, and hide the sidebar
if (data.dates.length === 0) {
this.setState({
loading: false
// mobileActive: 'gameView'
})
return
};
let liveGames = [];
let scheduledGames = [];
let finalGames = [];
let allGames = [];
let allGamesJSON = [];
let gamesContentData = [];
let records = [];
var fetches = [];
// looping through the scheduled games for the day
for (let i = 0, j = data.dates[0].games.length; i < j; i++) {
let iterGame = data.dates[0].games[i];
let homeTeam = iterGame.teams.home;
let awayTeam = iterGame.teams.away;
let gameState = iterGame.status.detailedState;
gameState = gameState.toLowerCase().replace(/\s/g, '');
let gamePk = iterGame.gamePk;
let gameRecords = {
gamePk: gamePk,
home: homeTeam.leagueRecord,
away: awayTeam.leagueRecord
}
records.push(gameRecords);
let apiString = 'https://statsapi.web.nhl.com//api/v1/game/' + gamePk + '/feed/live';
let apiContentString = 'https://statsapi.web.nhl.com/api/v1/game/' + gamePk + '/content'
// fetch live game data for each game; each fetch is added to an array of Promises to be sure all data is fetched before parsing
fetches.push(
fetch(apiString)
.then(gameResults => {
return gameResults.json();
}).then(gameData => {
if (gameState.search('progress') !== -1) {
liveGames = liveGames.concat(gameData);
} else if ((gameState.search('scheduled') !== -1) || (gameState.search('pre-game') !== -1)) {
scheduledGames = scheduledGames.concat(gameData);
} else if (gameState.search('final') !== -1) {
finalGames = finalGames.concat(gameData);
}
allGames = [liveGames,scheduledGames,finalGames,data];
allGamesJSON = allGamesJSON.concat(gameData);
})
);
// content data is available at a different endpoint and needs its own fetch
fetches.push(
fetch(apiContentString)
.then(contentRaw => {
return contentRaw.json();
}).then(content => {
gamesContentData = gamesContentData.concat(content);
})
)
};
Promise.all(fetches).then(() => {
// all promises resolved so now we can update the data in the app (but we need to sort it as Promises are not necessarily resolved in order)
sortByKey(liveGames,'gamePk');
sortByKey(scheduledGames,'gamePk');
sortByKey(finalGames,'gamePk');
sortByKey(allGamesJSON,'gamePk');
// if no main game is set, default to the first one
if (this.state.mainGamePk === "") {
let firstGamePk = allGames[3].dates[0].games[0].gamePk;
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
mainGamePk:firstGamePk,
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
} else {
this.setState({
loading:false,
liveGames: allGames[0],
scheduledGames:allGames[1],
finalGames:allGames[2],
gamesData:allGamesJSON,
gamesContentData:gamesContentData,
records:records
});
}
})
.catch(err => {return console.log(err);});
})
}
componentDidMount() {
this.refreshData();
this._interval = window.setInterval(this.refreshData,10000); // set refresh interval to 5s
}
componentWillUnMount() {
this._interval && window.clearInterval(this._interval); // remove refresh interval when unmounted
}
sideBarClick(gameFID) |
backButtonClick() {
this.setState({mobileActive:'list'}); // used for mobile to provide back button functionality
}
handleDateChange(date) {
this.setState({
date:date,
liveGames: [], // array of game data for live games
scheduledGames: [], // array of game data for scheduled games
finalGames:[], // array of game data for ended games
mainGamePk: "", // gamePk (id) of the game to be displayed in the main game area
gamesData:[], // array of all game data
gamesContentData:[], // array of all content (pregame/postgame) data
loading:true,
records:[],
// mobileActive:'list' // keeps track of view state for mobile (list => display the list; gameView => display the game data)
})
this.refreshData(date)
// this.setState({date:date})
}
render() {
let activeGameVar = this.state.mainGamePk;
// grab the game data to be displayed for the selected sidebar game
let tg = this.state.gamesData;
let activeGameData = tg.find(obj => {
return obj.gamePk == activeGameVar
});
// grab the preview/postgame data to be displayed for the selected sidebar game
let tc = this.state.gamesContentData;
let activeGameContent = tc.find(obj => {
return obj.link === '/api/v1/game/' + activeGameVar + '/content'
});
let r = this.state.records;
let activeRecords = r.find(obj => {
return obj.gamePk === activeGameVar
});
// if content is loading display the bounce loader in the main game area
let mainGameArea = ''
let sideBarArea = ''
if (this.state.loading) {
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<p>Loading...</p>
</div>
<div className="disclaimer">
<p>This website is not in any way affiliated with the National Hockey League (NHL) or any of its respective teams. The NHL logo, team logos, team names, and other trademarks/copyrighted images are the property of their respective owners.</p>
<p>If you are the owner of a trademark/copyrighted material that is used on this website and would like it removed, please <a href="mailto:[email protected]?Subject=Trademark/Copyright%20Issue">contact me</a>.</p>
</div>
</div>
);
mainGameArea = (
<BounceLoader
sizeUnit={"px"}
size={50}
color={'#262626'}
loading={this.state.loading}
/>
);
} else {
if (this.state.gamesData.length !== 0) {
mainGameArea = <ActiveGameArea backButtonClick={() => this.backButtonClick()} gameID={activeGameVar} data={activeGameData} content={activeGameContent} records={activeRecords} mobileActive={this.state.mobileActive}/>
sideBarArea = (
<div className={'gamesSideBar ' + (this.state.mobileActive === 'list' ? 'mobileActive' : '')}>
<div className="gamesScroll">
<MyDatePicker date={this.state.date} updateDate={this.handleDateChange}/>
<div className="gamesContainer live">
{
this.state.liveGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer sched">
{
this.state.scheduledGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} />)
}
</div>
<div className="gamesContainer final">
{
this.state.finalGames.map((game) => <SideBarGame key={game.gameData.game.pk} sideClick={this.sideBarClick} data={game} activeID={this.state.mainGamePk} | {
this.setState({mainGamePk: gameFID, mobileActive:'gameView'}); // handles sidebar game click
} | identifier_body |
13.1.rs | [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( ) ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S) ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.) ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.) ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... (.) [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S] ... (.) [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S] ... ... ( ) ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... ( ) ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] ... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn | get_scanners | identifier_name |
|
13.1.rs | 3 4 5 6
// [ ] [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
| // 0 1 2 3 4 5 6
// [ ] ( ) ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S) ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.) ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.) ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... (.) [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S] ... (.) [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S] ... ... ( ) ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... ( ) ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] ... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> | // [ ] [ ]
// Picosecond 1:
| random_line_split |
13.1.rs | 3 4 5 6
// [ ] [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( ) ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S) ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.) ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.) ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... (.) [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S] ... (.) [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S] ... ... ( ) ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... ( ) ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] ... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 |
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> | {
return Some(0);
} | conditional_block |
13.1.rs | [S] ... ... [S] ... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ] ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( ) ... ... [ ] ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S) ... ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.) ... [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.) ... [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ] ... (.) [ ] ... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S] ... (.) [ ] ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S] ... ... ( ) ... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... ( ) ... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S] ... ... [S] ... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] ... ... [ ] ... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn get_scanners(f: File) -> Scanners {
let buf = BufReader::new(f);
let mut scanners = HashMap::new();
let mut max_depth = 0;
let mut max_range = 0;
for line in buf.lines() {
let split = line.expect("io error")
.split(": ")
.map(|s| s.parse::<i32>().expect("parse int err"))
.collect::<Vec<i32>>();
scanners.insert(split[0], split[1]);
max_depth = if split[0] > max_depth {
split[0]
} else {
max_depth
};
max_range = if split[1] > max_range {
split[1]
} else {
max_range
};
}
Scanners {
max_range,
max_depth,
scanners,
}
}
/* Advent13-1 the total severity of starting at a given offset */
fn severity(offset: &i32, scanners: &Scanners) -> i32 {
let mut severity = 0;
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if let Some(pos) = scanners.pos(&d, &scanner_time) {
if pos == 0 {
let r = scanners.range(&d).unwrap();
// println!("Hit layer {} and got severity {}", d, r*d);
severity += *r * d;
}
}
// debug_print_scanners(&d, &scanner_time, &scanners);
d += 1;
}
severity
}
/* Advent13-2 does an offset result in detection? */
fn detected(offset: &i32, scanners: &Scanners) -> bool | {
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if scanners.collides(&d, &scanner_time) {
return true;
}
d += 1;
}
false
} | identifier_body |
|
config.js | zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'anchor port',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'perth',
title: 'Day 7: Perth (Freemantle Port), Australia',
image: 'https://www.oceaniacruises.com/Images/Ports/Medium/61286/FRE.jpg',
description: 'A stunning setting along Swan River and the Indian Ocean welcomes you into the laid-back city of Perth. At Kings Park, one of the few urban parks that dwarf Central Park, take in breathtaking views of downtown Perth and plan your day. Explore the city’s range of eclectic neighborhoods, immerse yourself in indigenous art at the Art Gallery of Western Australia, or discover Australia’s native wildlife at the Perth Zoo. Travel to nearby Swan Valley, Western Australia’s oldest wine region, to visit one of the many historic vineyards for a tasting of excellent local wines. Meanwhile, discover Fremantle’s distinct character through its 19th century port streetscape. Dive into its seafaring history at the Western Australia Maritime Museum and Shipwreck Galleries, visit Fremantle Prison or explore the Manjaree Heritage Trail for a glimpse of life before European settlement.',
location: {
center: [115.747710, -32.052836],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'ports',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'albany',
title: 'Day 8: Albany, Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/81604476841/ALH.jpg",
description: 'Western Australia’s oldest colonial settlement, Albany holds not only great historical significance but also immense natural beauty. Founded in 1826 as a British military outpost, the town evolved into a provisioning port for ships, a departure point for Australian troops bound for World War I, and eventually a base for the whaling industry. History buffs will be intrigued by sites such as the Anglican church, town hall, restored gaol and a one-of-a-kind whaling station museum, while the dramatic scenery of the rugged coastline will astound anyone fortunate enough to sail into Princess Royal Harbour. Known as Ataturk Entrance, the channel accessing the harbor from King George Sound is quite narrow, with forested peaks rising on either side from the rocky shores and sweeping white beaches. The view from your ship’s deck provides the perfect introduction to the natural wonders embracing this historic town.',
location: {
center: [117.886152, -35.031425],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'ports',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'adelaide',
title: 'Day 11 - 12: Adelaide, Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/81604380036/ADL.jpg",
description: 'South Australia’s coastal capital, Adelaide combines the charm of a picturesque country town with lively cosmopolitan offerings. Discover the region’s history at the South Australian Art Museum or browse local delicacies such as Adelaide Hills cheeses at the impressive Central Market. Visit Adelaide Oval, celebrated as the world’s prettiest cricket ground, for a behind-the-scenes look at the iconic stadium’s history. For a taste of the local wine culture, venture into the countryside to explore Barossa Valley, Clare Valley or McLaren Vale, three excellent nearby wine regions.',
location: {
center: [138.615234, -34.894724],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'ports',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'melbourne',
title: 'Day 13: Melbourne, Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/8607/MEB.jpg",
description: 'Head into the scenic Dandenong Mountains and ride the Puffing Billy steam train through the forest. Meet the koalas, wallabies and more at a wildlife park or explore local wineries. Take a walk and discover the citys marvelous architecture, chic galleries and boutiques.',
location: {
center: [144.916374, -37.848449],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'ports',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'burnie',
title: 'Day 14: Burnie (Tasmania), Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/61283/BWT.jpg",
description: 'Nestled in the northwest corner of Tasmania, this energetic and creative seaside town offers a unique perspective on the world of industry. Once dependent on paper manufacturing, the inventive community has transformed its industry into a compelling art culture. Visit Makers’ Workshop, part museum and part arts center, for an introduction to the inspired spirit of Burnie, along with a tour of the process of papermaking and a glimpse of local artisans at work on their handicrafts. Discover Burnie’s natural gems on the many trails at Fern Glade Reserve or the picturesque Emu Valley Rhododendron Garden.',
location: {
center: [145.91162, -41.058940],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'ports',
opacity: 1
}
],
onChapterExit: [
{
layer: 'ports',
opacity: 0
}
]
},
{
id: 'phillip',
title: 'Day 15: Phillip Island, Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/81604476847/PHP.jpg",
description: 'Phillip Island boasts wide sandy beaches, spectacular coastal scenery, excellent surfing and a world-famous Grand Prix circuit. However, its biggest attraction measures about one foot tall and weighs just two or three pounds. The island is home to thousands of little penguins, endearing not only as the smallest penguins on the planet but also for their unique blue coloring. Each day as the sun sets, the tiny birds return from a long day of fishing at sea and waddle up the beaches to the safety of their burrows. Affectionately known as the Penguin Parade, this magical scene has enchanted visitors since 1920. An array of wildlife beyond the penguins also can be observed here, including whales, koalas and Australia’s largest fur seal colony. Those not fond of furry friends might visit the National Vietnam Veterans Museum, take a stroll on the seaside boardwalk, or check out a local brewery or winery.',
location: {
center: [145.238177, -38.448267],
zoom: 4.5,
pitch: 60,
bearing: 0
},
onChapterEnter: [
{
layer: 'anchor port',
opacity: 1
}
],
onChapterExit: [
{
layer: 'anchor port',
opacity: 0
}
]
},
{
id: 'sydney',
title: 'Day 17: Sydney, Australia',
image: "https://www.oceaniacruises.com/Images/Ports/Medium/8785/SYD.jpg",
description: 'Visit the famous Opera House, stroll along the harbor, or cruise around it. Climb the Harbor Bridge, take a walk through the Rocks where Sydney began, or enjoy celebrated Bondi Beach. Head into the beautiful Blue Mountains for a taste of the country or visit a wildlife park and enjoy Australia’s fabulously unique creatures.',
|
location: {
center: [151.235148, -33.851759],
zoom: 4.5,
pitch: 60,
| random_line_split |
|
cache.go | Err func(err error) bool
group singleflight.Group
hits uint64
misses uint64
localHits uint64
localMisses uint64
}
// UseLocalCache causes Codec to cache items in local LRU cache.
func (cd *Codec) UseLocalCache(maxLen int, expiration time.Duration) {
cd.localCache = lrucache.New(maxLen, expiration)
}
func (cd *Codec) SetDefaultRedisExpiration(expiration time.Duration) {
cd.defaultRedisExpiration = expiration
cd.ensureDefaultExp()
}
// Set caches the item.
func (cd *Codec) Set(items ...*Item) error {
if len(items) == 1 {
_, err := cd.setItem(items[0])
return err
} else if len(items) > 1 {
return cd.mSetItems(items)
}
return nil
}
func (cd *Codec) setItem(item *Item) ([]byte, error) {
object, err := item.object()
if err != nil {
return nil, err
}
b, err := cd.Marshal(object)
if err != nil {
log.Printf("cache: Marshal key=%q failed: %s", item.Key, err)
return nil, err
}
if cd.localCache != nil {
cd.localCache.Set(item.Key, b)
}
if cd.Redis == nil {
if cd.localCache == nil {
return nil, errRedisLocalCacheNil
}
return b, nil
}
err = cd.Redis.Set(item.Key, b, cd.exp(item.Expiration)).Err()
if err != nil {
log.Printf("cache: Set key=%q failed: %s", item.Key, err)
}
return b, err
}
// Exists reports whether object for the given key exists.
func (cd *Codec) Exists(key string) bool {
return cd.Get(key, nil) == nil
}
// Get gets the object for the given key.
func (cd *Codec) Get(key string, object interface{}) error {
return cd.get(key, object, false)
}
func (cd *Codec) get(key string, object interface{}, onlyLocalCache bool) error {
b, err := cd.getBytes(key, onlyLocalCache)
if err != nil {
return err
}
if object == nil || len(b) == 0 {
return nil
}
err = cd.Unmarshal(b, object)
if err != nil {
log.Printf("cache: key=%q Unmarshal(%T) failed: %s", key, object, err)
return err
}
return nil
}
func (cd *Codec) MGet(dst interface{}, keys ...string) error {
mapValue := reflect.ValueOf(dst)
if mapValue.Kind() == reflect.Ptr {
// get the value that the pointer mapValue points to.
mapValue = mapValue.Elem()
}
if mapValue.Kind() != reflect.Map {
return fmt.Errorf("dst must be a map instead of %v", mapValue.Type())
}
mapType := mapValue.Type()
// get the type of the key.
keyType := mapType.Key()
if keyType.Kind() != reflect.String {
return fmt.Errorf("dst key type must be a string, %v given", keyType.Kind())
}
elementType := mapType.Elem()
// non-pointer values not supported yet
if elementType.Kind() != reflect.Ptr {
return fmt.Errorf("dst value type must be a pointer, %v given", elementType.Kind())
}
// get the value that the pointer elementType points to.
elementType = elementType.Elem()
// allocate a new map, if mapValue is nil.
// @todo fix "reflect.Value.Set using unaddressable value"
if mapValue.IsNil() {
mapValue.Set(reflect.MakeMap(mapType))
}
res, err := cd.mGetBytes(keys)
if err != nil {
return err
}
for idx, data := range res {
bytes, ok := data.([]byte)
if !ok || bytes == nil {
continue
}
elementValue := reflect.New(elementType)
dstEl := elementValue.Interface()
err := cd.Unmarshal(bytes, dstEl)
if err != nil {
return err
}
key := reflect.ValueOf(keys[idx])
mapValue.SetMapIndex(key, reflect.ValueOf(dstEl))
}
return nil
}
func (cd *Codec) BatchLoadAndCache(batchArgs *BatchArgs) error {
dstSlice := reflect.ValueOf(batchArgs.Dst)
if dstSlice.Kind() == reflect.Ptr {
dstSlice = dstSlice.Elem()
}
if dstSlice.Kind() != reflect.Slice {
return fmt.Errorf("slice expected as a destination, %s received", dstSlice.Kind())
}
sliceElem := dstSlice.Type().Elem()
m := reflect.MakeMap( reflect.MapOf(reflect.TypeOf(""), sliceElem)).Interface()
mArgs := &MGetArgs{
Keys: batchArgs.Keys,
Dst: m,
ObjByCacheKeyLoader: func(keysToLoad []string) (map[string]interface{}, error) {
for _, k := range keysToLoad {
batchArgs.CollectMissedKey(k)
}
loadedItems, err := batchArgs.BatchLoader()
if err != nil {
return nil, err
}
li := reflect.ValueOf(loadedItems)
if li.Kind() == reflect.Ptr {
li = li.Elem()
}
var result map[string]interface{}
switch li.Kind() {
case reflect.Slice:
result = make(map[string]interface{}, li.Len())
for i := 0; i < li.Len(); i++ {
elem := li.Index(i).Interface()
result[batchArgs.ItemToKey(elem)] = elem
}
default:
return nil, fmt.Errorf("slice expected from the loader function, %s received", li.Kind())
}
if len(keysToLoad) != len(result) && batchArgs.CreateObjectForMissedKey != nil {
for _, k := range keysToLoad {
if _, exists := result[k]; !exists {
objToCache, returnInResult := batchArgs.CreateObjectForMissedKey(k)
if returnInResult {
result[k] = objToCache
} else {
result[k] = &Item{
Key: k,
Object: objToCache,
Expiration: batchArgs.Expiration,
doNotReturn: true,
}
}
}
}
}
return result, nil
},
Expiration: batchArgs.Expiration,
}
err := cd.MGetAndCache(mArgs)
if err != nil {
return err
}
reflectedMap := reflect.ValueOf(mArgs.Dst)
slice := reflect.MakeSlice(reflect.SliceOf(sliceElem), 0, reflectedMap.Len())
for _, k := range reflectedMap.MapKeys() {
v := reflectedMap.MapIndex(k)
slice = reflect.Append(slice, v)
}
dstSlice.Set(slice)
batchArgs.Dst = dstSlice.Interface()
return nil
}
func (cd *Codec) MGetAndCache(mItem *MGetArgs) error {
err := cd.MGet(mItem.Dst, mItem.Keys...)
if err != nil {
return err
}
m := reflect.ValueOf(mItem.Dst)
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
// map type is checked in the MGet function
if m.Len() != len(mItem.Keys) {
absentKeys := make([]string, len(mItem.Keys)-m.Len())
idx := 0
for _, k := range mItem.Keys {
mapVal := m.MapIndex(reflect.ValueOf(k))
if !mapVal.IsValid() {
absentKeys[idx] = k
idx++
}
}
loadedData, loaderErr := mItem.ObjByCacheKeyLoader(absentKeys)
if loaderErr != nil {
return loaderErr
}
items := make([]*Item, len(loadedData))
i := 0
for key, d := range loadedData {
var item *Item
var obj interface{}
if it, ok := d.(*Item); ok {
item = it
obj = it.Object
} else { | }
obj = d
}
if !item.doNotReturn {
m.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(obj))
}
items[i] = item
i++
}
return cd.Set(items...)
}
return nil
}
func (cd *Codec) mSetItems(items []*Item) error {
var pipeline redis.Pipeliner
if cd.Redis != nil {
pipeline = cd.Redis.Pipeline()
}
for _, item := range items {
key := item.Key
bytes, e := cd.Marshal(item.Object)
if e != nil {
return e
}
if cd.localCache != nil {
cd.localCache.Set(key, bytes)
}
if pipeline != nil {
pipeline.Set(key, bytes, cd.exp(item.Expiration))
}
}
if pipeline != nil {
_, err := pipeline.Exec()
if err != nil {
return err
}
}
return nil
}
// mGetBytes actually returns [][]bytes in an order which corresponds to | item = &Item{
Key: key,
Object: d,
Expiration: mItem.Expiration, | random_line_split |
cache.go | func(err error) bool
group singleflight.Group
hits uint64
misses uint64
localHits uint64
localMisses uint64
}
// UseLocalCache causes Codec to cache items in local LRU cache.
func (cd *Codec) UseLocalCache(maxLen int, expiration time.Duration) {
cd.localCache = lrucache.New(maxLen, expiration)
}
func (cd *Codec) SetDefaultRedisExpiration(expiration time.Duration) {
cd.defaultRedisExpiration = expiration
cd.ensureDefaultExp()
}
// Set caches the item.
func (cd *Codec) Set(items ...*Item) error |
func (cd *Codec) setItem(item *Item) ([]byte, error) {
object, err := item.object()
if err != nil {
return nil, err
}
b, err := cd.Marshal(object)
if err != nil {
log.Printf("cache: Marshal key=%q failed: %s", item.Key, err)
return nil, err
}
if cd.localCache != nil {
cd.localCache.Set(item.Key, b)
}
if cd.Redis == nil {
if cd.localCache == nil {
return nil, errRedisLocalCacheNil
}
return b, nil
}
err = cd.Redis.Set(item.Key, b, cd.exp(item.Expiration)).Err()
if err != nil {
log.Printf("cache: Set key=%q failed: %s", item.Key, err)
}
return b, err
}
// Exists reports whether object for the given key exists.
func (cd *Codec) Exists(key string) bool {
return cd.Get(key, nil) == nil
}
// Get gets the object for the given key.
func (cd *Codec) Get(key string, object interface{}) error {
return cd.get(key, object, false)
}
func (cd *Codec) get(key string, object interface{}, onlyLocalCache bool) error {
b, err := cd.getBytes(key, onlyLocalCache)
if err != nil {
return err
}
if object == nil || len(b) == 0 {
return nil
}
err = cd.Unmarshal(b, object)
if err != nil {
log.Printf("cache: key=%q Unmarshal(%T) failed: %s", key, object, err)
return err
}
return nil
}
func (cd *Codec) MGet(dst interface{}, keys ...string) error {
mapValue := reflect.ValueOf(dst)
if mapValue.Kind() == reflect.Ptr {
// get the value that the pointer mapValue points to.
mapValue = mapValue.Elem()
}
if mapValue.Kind() != reflect.Map {
return fmt.Errorf("dst must be a map instead of %v", mapValue.Type())
}
mapType := mapValue.Type()
// get the type of the key.
keyType := mapType.Key()
if keyType.Kind() != reflect.String {
return fmt.Errorf("dst key type must be a string, %v given", keyType.Kind())
}
elementType := mapType.Elem()
// non-pointer values not supported yet
if elementType.Kind() != reflect.Ptr {
return fmt.Errorf("dst value type must be a pointer, %v given", elementType.Kind())
}
// get the value that the pointer elementType points to.
elementType = elementType.Elem()
// allocate a new map, if mapValue is nil.
// @todo fix "reflect.Value.Set using unaddressable value"
if mapValue.IsNil() {
mapValue.Set(reflect.MakeMap(mapType))
}
res, err := cd.mGetBytes(keys)
if err != nil {
return err
}
for idx, data := range res {
bytes, ok := data.([]byte)
if !ok || bytes == nil {
continue
}
elementValue := reflect.New(elementType)
dstEl := elementValue.Interface()
err := cd.Unmarshal(bytes, dstEl)
if err != nil {
return err
}
key := reflect.ValueOf(keys[idx])
mapValue.SetMapIndex(key, reflect.ValueOf(dstEl))
}
return nil
}
func (cd *Codec) BatchLoadAndCache(batchArgs *BatchArgs) error {
dstSlice := reflect.ValueOf(batchArgs.Dst)
if dstSlice.Kind() == reflect.Ptr {
dstSlice = dstSlice.Elem()
}
if dstSlice.Kind() != reflect.Slice {
return fmt.Errorf("slice expected as a destination, %s received", dstSlice.Kind())
}
sliceElem := dstSlice.Type().Elem()
m := reflect.MakeMap( reflect.MapOf(reflect.TypeOf(""), sliceElem)).Interface()
mArgs := &MGetArgs{
Keys: batchArgs.Keys,
Dst: m,
ObjByCacheKeyLoader: func(keysToLoad []string) (map[string]interface{}, error) {
for _, k := range keysToLoad {
batchArgs.CollectMissedKey(k)
}
loadedItems, err := batchArgs.BatchLoader()
if err != nil {
return nil, err
}
li := reflect.ValueOf(loadedItems)
if li.Kind() == reflect.Ptr {
li = li.Elem()
}
var result map[string]interface{}
switch li.Kind() {
case reflect.Slice:
result = make(map[string]interface{}, li.Len())
for i := 0; i < li.Len(); i++ {
elem := li.Index(i).Interface()
result[batchArgs.ItemToKey(elem)] = elem
}
default:
return nil, fmt.Errorf("slice expected from the loader function, %s received", li.Kind())
}
if len(keysToLoad) != len(result) && batchArgs.CreateObjectForMissedKey != nil {
for _, k := range keysToLoad {
if _, exists := result[k]; !exists {
objToCache, returnInResult := batchArgs.CreateObjectForMissedKey(k)
if returnInResult {
result[k] = objToCache
} else {
result[k] = &Item{
Key: k,
Object: objToCache,
Expiration: batchArgs.Expiration,
doNotReturn: true,
}
}
}
}
}
return result, nil
},
Expiration: batchArgs.Expiration,
}
err := cd.MGetAndCache(mArgs)
if err != nil {
return err
}
reflectedMap := reflect.ValueOf(mArgs.Dst)
slice := reflect.MakeSlice(reflect.SliceOf(sliceElem), 0, reflectedMap.Len())
for _, k := range reflectedMap.MapKeys() {
v := reflectedMap.MapIndex(k)
slice = reflect.Append(slice, v)
}
dstSlice.Set(slice)
batchArgs.Dst = dstSlice.Interface()
return nil
}
func (cd *Codec) MGetAndCache(mItem *MGetArgs) error {
err := cd.MGet(mItem.Dst, mItem.Keys...)
if err != nil {
return err
}
m := reflect.ValueOf(mItem.Dst)
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
// map type is checked in the MGet function
if m.Len() != len(mItem.Keys) {
absentKeys := make([]string, len(mItem.Keys)-m.Len())
idx := 0
for _, k := range mItem.Keys {
mapVal := m.MapIndex(reflect.ValueOf(k))
if !mapVal.IsValid() {
absentKeys[idx] = k
idx++
}
}
loadedData, loaderErr := mItem.ObjByCacheKeyLoader(absentKeys)
if loaderErr != nil {
return loaderErr
}
items := make([]*Item, len(loadedData))
i := 0
for key, d := range loadedData {
var item *Item
var obj interface{}
if it, ok := d.(*Item); ok {
item = it
obj = it.Object
} else {
item = &Item{
Key: key,
Object: d,
Expiration: mItem.Expiration,
}
obj = d
}
if !item.doNotReturn {
m.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(obj))
}
items[i] = item
i++
}
return cd.Set(items...)
}
return nil
}
func (cd *Codec) mSetItems(items []*Item) error {
var pipeline redis.Pipeliner
if cd.Redis != nil {
pipeline = cd.Redis.Pipeline()
}
for _, item := range items {
key := item.Key
bytes, e := cd.Marshal(item.Object)
if e != nil {
return e
}
if cd.localCache != nil {
cd.localCache.Set(key, bytes)
}
if pipeline != nil {
pipeline.Set(key, bytes, cd.exp(item.Expiration))
}
}
if pipeline != nil {
_, err := pipeline.Exec()
if err != nil {
return err
}
}
return nil
}
// mGetBytes actually returns [][]bytes in an order which corresponds | {
if len(items) == 1 {
_, err := cd.setItem(items[0])
return err
} else if len(items) > 1 {
return cd.mSetItems(items)
}
return nil
} | identifier_body |
cache.go | func(err error) bool
group singleflight.Group
hits uint64
misses uint64
localHits uint64
localMisses uint64
}
// UseLocalCache causes Codec to cache items in local LRU cache.
func (cd *Codec) UseLocalCache(maxLen int, expiration time.Duration) {
cd.localCache = lrucache.New(maxLen, expiration)
}
func (cd *Codec) SetDefaultRedisExpiration(expiration time.Duration) {
cd.defaultRedisExpiration = expiration
cd.ensureDefaultExp()
}
// Set caches the item.
func (cd *Codec) Set(items ...*Item) error {
if len(items) == 1 {
_, err := cd.setItem(items[0])
return err
} else if len(items) > 1 {
return cd.mSetItems(items)
}
return nil
}
func (cd *Codec) setItem(item *Item) ([]byte, error) {
object, err := item.object()
if err != nil {
return nil, err
}
b, err := cd.Marshal(object)
if err != nil {
log.Printf("cache: Marshal key=%q failed: %s", item.Key, err)
return nil, err
}
if cd.localCache != nil {
cd.localCache.Set(item.Key, b)
}
if cd.Redis == nil {
if cd.localCache == nil {
return nil, errRedisLocalCacheNil
}
return b, nil
}
err = cd.Redis.Set(item.Key, b, cd.exp(item.Expiration)).Err()
if err != nil {
log.Printf("cache: Set key=%q failed: %s", item.Key, err)
}
return b, err
}
// Exists reports whether object for the given key exists.
func (cd *Codec) Exists(key string) bool {
return cd.Get(key, nil) == nil
}
// Get gets the object for the given key.
func (cd *Codec) Get(key string, object interface{}) error {
return cd.get(key, object, false)
}
func (cd *Codec) get(key string, object interface{}, onlyLocalCache bool) error {
b, err := cd.getBytes(key, onlyLocalCache)
if err != nil {
return err
}
if object == nil || len(b) == 0 {
return nil
}
err = cd.Unmarshal(b, object)
if err != nil {
log.Printf("cache: key=%q Unmarshal(%T) failed: %s", key, object, err)
return err
}
return nil
}
func (cd *Codec) MGet(dst interface{}, keys ...string) error {
mapValue := reflect.ValueOf(dst)
if mapValue.Kind() == reflect.Ptr {
// get the value that the pointer mapValue points to.
mapValue = mapValue.Elem()
}
if mapValue.Kind() != reflect.Map {
return fmt.Errorf("dst must be a map instead of %v", mapValue.Type())
}
mapType := mapValue.Type()
// get the type of the key.
keyType := mapType.Key()
if keyType.Kind() != reflect.String |
elementType := mapType.Elem()
// non-pointer values not supported yet
if elementType.Kind() != reflect.Ptr {
return fmt.Errorf("dst value type must be a pointer, %v given", elementType.Kind())
}
// get the value that the pointer elementType points to.
elementType = elementType.Elem()
// allocate a new map, if mapValue is nil.
// @todo fix "reflect.Value.Set using unaddressable value"
if mapValue.IsNil() {
mapValue.Set(reflect.MakeMap(mapType))
}
res, err := cd.mGetBytes(keys)
if err != nil {
return err
}
for idx, data := range res {
bytes, ok := data.([]byte)
if !ok || bytes == nil {
continue
}
elementValue := reflect.New(elementType)
dstEl := elementValue.Interface()
err := cd.Unmarshal(bytes, dstEl)
if err != nil {
return err
}
key := reflect.ValueOf(keys[idx])
mapValue.SetMapIndex(key, reflect.ValueOf(dstEl))
}
return nil
}
func (cd *Codec) BatchLoadAndCache(batchArgs *BatchArgs) error {
dstSlice := reflect.ValueOf(batchArgs.Dst)
if dstSlice.Kind() == reflect.Ptr {
dstSlice = dstSlice.Elem()
}
if dstSlice.Kind() != reflect.Slice {
return fmt.Errorf("slice expected as a destination, %s received", dstSlice.Kind())
}
sliceElem := dstSlice.Type().Elem()
m := reflect.MakeMap( reflect.MapOf(reflect.TypeOf(""), sliceElem)).Interface()
mArgs := &MGetArgs{
Keys: batchArgs.Keys,
Dst: m,
ObjByCacheKeyLoader: func(keysToLoad []string) (map[string]interface{}, error) {
for _, k := range keysToLoad {
batchArgs.CollectMissedKey(k)
}
loadedItems, err := batchArgs.BatchLoader()
if err != nil {
return nil, err
}
li := reflect.ValueOf(loadedItems)
if li.Kind() == reflect.Ptr {
li = li.Elem()
}
var result map[string]interface{}
switch li.Kind() {
case reflect.Slice:
result = make(map[string]interface{}, li.Len())
for i := 0; i < li.Len(); i++ {
elem := li.Index(i).Interface()
result[batchArgs.ItemToKey(elem)] = elem
}
default:
return nil, fmt.Errorf("slice expected from the loader function, %s received", li.Kind())
}
if len(keysToLoad) != len(result) && batchArgs.CreateObjectForMissedKey != nil {
for _, k := range keysToLoad {
if _, exists := result[k]; !exists {
objToCache, returnInResult := batchArgs.CreateObjectForMissedKey(k)
if returnInResult {
result[k] = objToCache
} else {
result[k] = &Item{
Key: k,
Object: objToCache,
Expiration: batchArgs.Expiration,
doNotReturn: true,
}
}
}
}
}
return result, nil
},
Expiration: batchArgs.Expiration,
}
err := cd.MGetAndCache(mArgs)
if err != nil {
return err
}
reflectedMap := reflect.ValueOf(mArgs.Dst)
slice := reflect.MakeSlice(reflect.SliceOf(sliceElem), 0, reflectedMap.Len())
for _, k := range reflectedMap.MapKeys() {
v := reflectedMap.MapIndex(k)
slice = reflect.Append(slice, v)
}
dstSlice.Set(slice)
batchArgs.Dst = dstSlice.Interface()
return nil
}
func (cd *Codec) MGetAndCache(mItem *MGetArgs) error {
err := cd.MGet(mItem.Dst, mItem.Keys...)
if err != nil {
return err
}
m := reflect.ValueOf(mItem.Dst)
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
// map type is checked in the MGet function
if m.Len() != len(mItem.Keys) {
absentKeys := make([]string, len(mItem.Keys)-m.Len())
idx := 0
for _, k := range mItem.Keys {
mapVal := m.MapIndex(reflect.ValueOf(k))
if !mapVal.IsValid() {
absentKeys[idx] = k
idx++
}
}
loadedData, loaderErr := mItem.ObjByCacheKeyLoader(absentKeys)
if loaderErr != nil {
return loaderErr
}
items := make([]*Item, len(loadedData))
i := 0
for key, d := range loadedData {
var item *Item
var obj interface{}
if it, ok := d.(*Item); ok {
item = it
obj = it.Object
} else {
item = &Item{
Key: key,
Object: d,
Expiration: mItem.Expiration,
}
obj = d
}
if !item.doNotReturn {
m.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(obj))
}
items[i] = item
i++
}
return cd.Set(items...)
}
return nil
}
func (cd *Codec) mSetItems(items []*Item) error {
var pipeline redis.Pipeliner
if cd.Redis != nil {
pipeline = cd.Redis.Pipeline()
}
for _, item := range items {
key := item.Key
bytes, e := cd.Marshal(item.Object)
if e != nil {
return e
}
if cd.localCache != nil {
cd.localCache.Set(key, bytes)
}
if pipeline != nil {
pipeline.Set(key, bytes, cd.exp(item.Expiration))
}
}
if pipeline != nil {
_, err := pipeline.Exec()
if err != nil {
return err
}
}
return nil
}
// mGetBytes actually returns [][]bytes in an order which corresponds | {
return fmt.Errorf("dst key type must be a string, %v given", keyType.Kind())
} | conditional_block |
cache.go |
}
default:
return nil, fmt.Errorf("slice expected from the loader function, %s received", li.Kind())
}
if len(keysToLoad) != len(result) && batchArgs.CreateObjectForMissedKey != nil {
for _, k := range keysToLoad {
if _, exists := result[k]; !exists {
objToCache, returnInResult := batchArgs.CreateObjectForMissedKey(k)
if returnInResult {
result[k] = objToCache
} else {
result[k] = &Item{
Key: k,
Object: objToCache,
Expiration: batchArgs.Expiration,
doNotReturn: true,
}
}
}
}
}
return result, nil
},
Expiration: batchArgs.Expiration,
}
err := cd.MGetAndCache(mArgs)
if err != nil {
return err
}
reflectedMap := reflect.ValueOf(mArgs.Dst)
slice := reflect.MakeSlice(reflect.SliceOf(sliceElem), 0, reflectedMap.Len())
for _, k := range reflectedMap.MapKeys() {
v := reflectedMap.MapIndex(k)
slice = reflect.Append(slice, v)
}
dstSlice.Set(slice)
batchArgs.Dst = dstSlice.Interface()
return nil
}
func (cd *Codec) MGetAndCache(mItem *MGetArgs) error {
err := cd.MGet(mItem.Dst, mItem.Keys...)
if err != nil {
return err
}
m := reflect.ValueOf(mItem.Dst)
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
// map type is checked in the MGet function
if m.Len() != len(mItem.Keys) {
absentKeys := make([]string, len(mItem.Keys)-m.Len())
idx := 0
for _, k := range mItem.Keys {
mapVal := m.MapIndex(reflect.ValueOf(k))
if !mapVal.IsValid() {
absentKeys[idx] = k
idx++
}
}
loadedData, loaderErr := mItem.ObjByCacheKeyLoader(absentKeys)
if loaderErr != nil {
return loaderErr
}
items := make([]*Item, len(loadedData))
i := 0
for key, d := range loadedData {
var item *Item
var obj interface{}
if it, ok := d.(*Item); ok {
item = it
obj = it.Object
} else {
item = &Item{
Key: key,
Object: d,
Expiration: mItem.Expiration,
}
obj = d
}
if !item.doNotReturn {
m.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(obj))
}
items[i] = item
i++
}
return cd.Set(items...)
}
return nil
}
func (cd *Codec) mSetItems(items []*Item) error {
var pipeline redis.Pipeliner
if cd.Redis != nil {
pipeline = cd.Redis.Pipeline()
}
for _, item := range items {
key := item.Key
bytes, e := cd.Marshal(item.Object)
if e != nil {
return e
}
if cd.localCache != nil {
cd.localCache.Set(key, bytes)
}
if pipeline != nil {
pipeline.Set(key, bytes, cd.exp(item.Expiration))
}
}
if pipeline != nil {
_, err := pipeline.Exec()
if err != nil {
return err
}
}
return nil
}
// mGetBytes actually returns [][]bytes in an order which corresponds to the provided keys
// an interface{} is used to not allocate intermediate structures
func (cd *Codec) mGetBytes(keys []string) ([]interface{}, error) {
collectedData := make([]interface{}, len(keys))
recordsMissedInLocalCache := len(keys)
if cd.localCache != nil {
for idx, k := range keys {
var err error
var d []byte
d, err = cd.getBytes(k, true)
if err == nil {
collectedData[idx] = d
recordsMissedInLocalCache--
}
}
}
if cd.Redis != nil && recordsMissedInLocalCache > 0 {
pipeline := cd.Redis.Pipeline()
defer pipeline.Close()
for idx, b := range collectedData {
if b == nil {
// the pipeline result is stored here to be able not to store indexes for non-local keys
collectedData[idx] = pipeline.Get(keys[idx])
}
}
_, pipelineErr := pipeline.Exec()
if pipelineErr != nil && pipelineErr != redis.Nil &&
(cd.SkipPipelineErr != nil && !cd.SkipPipelineErr(pipelineErr)) {
return nil, pipelineErr
}
hits := 0
for idx, content := range collectedData {
if redisResp, ok := content.(*redis.StringCmd); ok {
data, respErr := redisResp.Result()
if respErr == redis.Nil {
collectedData[idx] = nil
continue
}
if respErr != nil {
return nil, respErr
}
collectedData[idx] = []byte(data)
hits++
}
}
misses := recordsMissedInLocalCache - hits
atomic.AddUint64(&cd.hits, uint64(hits))
atomic.AddUint64(&cd.misses, uint64(misses))
}
return collectedData, nil
}
func (cd *Codec) getBytes(key string, onlyLocalCache bool) ([]byte, error) {
if cd.localCache != nil {
b, ok := cd.localCache.Get(key)
if ok {
atomic.AddUint64(&cd.localHits, 1)
return b, nil
}
atomic.AddUint64(&cd.localMisses, 1)
}
if onlyLocalCache {
return nil, ErrCacheMiss
}
if cd.Redis == nil {
if cd.localCache == nil {
return nil, errRedisLocalCacheNil
}
return nil, ErrCacheMiss
}
b, err := cd.Redis.Get(key).Bytes()
if err != nil {
atomic.AddUint64(&cd.misses, 1)
if err == redis.Nil {
return nil, ErrCacheMiss
}
log.Printf("cache: Get key=%q failed: %s", key, err)
return nil, err
}
atomic.AddUint64(&cd.hits, 1)
if cd.localCache != nil {
cd.localCache.Set(key, b)
}
return b, nil
}
// Once gets the item.Object for the given item.Key from the cache or
// executes, caches, and returns the results of the given item.Func,
// making sure that only one execution is in-flight for a given item.Key
// at a time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
func (cd *Codec) Once(item *Item) error {
b, cached, err := cd.getSetItemBytesOnce(item)
if err != nil {
return err
}
if item.Object == nil || len(b) == 0 {
return nil
}
err = cd.Unmarshal(b, item.Object)
if err != nil {
log.Printf("cache: key=%q Unmarshal(%T) failed: %s", item.Key, item.Object, err)
if cached {
_ = cd.Delete(item.Key)
return cd.Once(item)
} else {
return err
}
}
return nil
}
func (cd *Codec) getSetItemBytesOnce(item *Item) (b []byte, cached bool, err error) {
if cd.localCache != nil {
b, err := cd.getItemBytesFast(item)
if err == nil {
return b, true, nil
}
}
obj, err := cd.group.Do(item.Key, func() (interface{}, error) {
b, err := cd.getItemBytes(item)
if err == nil {
cached = true
return b, nil
}
obj, err := item.Func()
if err != nil {
return nil, err
}
b, err = cd.setItem(&Item{
Key: item.Key,
Object: obj,
Expiration: item.Expiration,
})
if b != nil {
// Ignore error if we have the result.
return b, nil
}
return nil, err
})
if err != nil {
return nil, false, err
}
return obj.([]byte), cached, nil
}
func (cd *Codec) getItemBytes(item *Item) ([]byte, error) {
return cd.getBytes(item.Key, false)
}
func (cd *Codec) getItemBytesFast(item *Item) ([]byte, error) {
return cd.getBytes(item.Key, true)
}
func (cd *Codec) exp(itemExp time.Duration) time.Duration {
if itemExp < 0 {
return 0
}
if itemExp < time.Second {
cd.ensureDefaultExp()
return cd.defaultRedisExpiration
}
return itemExp
}
func (cd *Codec) | ensureDefaultExp | identifier_name |
|
lib.rs | pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" | else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf | {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} | conditional_block |
lib.rs | pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens |
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics :: | {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
} | identifier_body |
lib.rs | pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream { | match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf:: | random_line_split |
|
lib.rs | pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn | (ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf | impl_object_for_struct | identifier_name |
swarm_test.go | .ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
// QUIC gates the connection after completion of the handshake
disableOnQUIC: true,
},
"p2 gates inbound peer dial before multiplexing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates inbound peer dial after upgrading": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates outbound dials": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
}
for n, tc := range tcs {
for _, useQuic := range []bool{false, true} {
trString := "TCP"
optTransport := OptDisableQUIC
if useQuic {
if tc.disableOnQUIC {
continue
}
trString = "QUIC"
optTransport = OptDisableTCP
}
t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) {
p1Gater := DefaultMockConnectionGater()
p2Gater := DefaultMockConnectionGater()
if tc.p1Gater != nil {
p1Gater = tc.p1Gater(p1Gater)
}
if tc.p2Gater != nil {
p2Gater = tc.p2Gater(p2Gater)
}
sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
p1 := sw1.LocalPeer()
p2 := sw2.LocalPeer()
sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL)
// 1 -> 2
_, err := sw1.DialPeer(ctx, p2)
require.Equal(t, tc.isP1OutboundErr, err != nil, n)
require.Equal(t, tc.p1ConnectednessToP2, sw1.Connectedness(p2), n)
require.Eventually(t, func() bool {
return tc.p2ConnectednessToP1 == sw2.Connectedness(p1)
}, 2*time.Second, 100*time.Millisecond, n)
})
}
}
}
func TestNoDial(t *testing.T) {
swarms := makeSwarms(t, 2)
_, err := swarms[0].NewStream(network.WithNoDial(context.Background(), "swarm test"), swarms[1].LocalPeer())
if err != network.ErrNoConn {
t.Fatal("should have failed with ErrNoConn")
}
}
func TestCloseWithOpenStreams(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(t, 2)
connectSwarms(t, ctx, swarms)
s, err := swarms[0].NewStream(ctx, swarms[1].LocalPeer())
require.NoError(t, err)
defer s.Close()
// close swarm before stream.
require.NoError(t, swarms[0].Close())
}
func TestTypedNilConn(t *testing.T) {
s := GenSwarm(t)
defer s.Close()
// We can't dial ourselves.
c, err := s.DialPeer(context.Background(), s.LocalPeer())
require.Error(t, err)
// If we fail to dial, the connection should be nil.
require.Nil(t, c)
}
func TestPreventDialListenAddr(t *testing.T) {
s := GenSwarm(t, OptDialOnly)
if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1")); err != nil {
t.Fatal(err)
}
addrs, err := s.InterfaceListenAddresses()
if err != nil {
t.Fatal(err)
}
var addr ma.Multiaddr
for _, a := range addrs {
_, s, err := manet.DialArgs(a)
if err != nil {
t.Fatal(err)
}
if strings.Split(s, ":")[0] == "127.0.0.1" {
addr = a
break
}
}
remote := test.RandPeerIDFatal(t)
s.Peerstore().AddAddr(remote, addr, time.Hour)
_, err = s.DialPeer(context.Background(), remote)
if !errors.Is(err, swarm.ErrNoGoodAddresses) {
t.Fatal("expected dial to fail: %w", err)
}
}
func TestStreamCount(t *testing.T) {
s1 := GenSwarm(t)
s2 := GenSwarm(t)
connectSwarms(t, context.Background(), []*swarm.Swarm{s2, s1})
countStreams := func() (n int) {
var num int
for _, c := range s1.ConnsToPeer(s2.LocalPeer()) {
n += c.Stat().NumStreams
num += len(c.GetStreams())
}
require.Equal(t, n, num, "inconsistent stream count")
return
}
streams := make(chan network.Stream, 20)
streamAccepted := make(chan struct{}, 1)
s1.SetStreamHandler(func(str network.Stream) {
streams <- str
streamAccepted <- struct{}{}
})
for i := 0; i < 10; i++ {
str, err := s2.NewStream(context.Background(), s1.LocalPeer())
require.NoError(t, err)
str.Write([]byte("foobar"))
<-streamAccepted
}
require.Eventually(t, func() bool { return len(streams) == 10 }, 5*time.Second, 10*time.Millisecond)
require.Equal(t, countStreams(), 10)
(<-streams).Reset()
(<-streams).Close()
require.Equal(t, countStreams(), 8)
str, err := s1.NewStream(context.Background(), s2.LocalPeer())
require.NoError(t, err)
require.Equal(t, countStreams(), 9)
str.Close()
require.Equal(t, countStreams(), 8)
}
func TestResourceManager(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
defer s1.Close()
rcmgr2 := mocknetwork.NewMockResourceManager(ctrl)
s2 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr2)))
defer s2.Close()
connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
strChan := make(chan network.Stream)
s2.SetStreamHandler(func(str network.Stream) { strChan <- str })
streamScope1 := mocknetwork.NewMockStreamManagementScope(ctrl)
rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(streamScope1, nil)
streamScope2 := mocknetwork.NewMockStreamManagementScope(ctrl)
rcmgr2.EXPECT().OpenStream(s1.LocalPeer(), network.DirInbound).Return(streamScope2, nil)
str, err := s1.NewStream(context.Background(), s2.LocalPeer())
require.NoError(t, err)
defer str.Close()
str.Write([]byte("foobar"))
p := protocol.ID("proto")
streamScope1.EXPECT().SetProtocol(p)
require.NoError(t, str.SetProtocol(p))
sstr := <-strChan
streamScope2.EXPECT().Done()
require.NoError(t, sstr.Close())
streamScope1.EXPECT().Done()
}
func TestResourceManagerNewStream(t *testing.T) | {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
defer s1.Close()
s2 := GenSwarm(t)
defer s2.Close()
connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
rerr := errors.New("denied")
rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(nil, rerr)
_, err := s1.NewStream(context.Background(), s2.LocalPeer())
require.ErrorIs(t, err, rerr)
} | identifier_body |
|
swarm_test.go | Reuseport)
// connect everyone
connectSwarms(t, context.Background(), swarms)
// ping/pong
for _, s1 := range swarms {
log.Debugf("-------------------------------------------------------")
log.Debugf("%s ping pong round", s1.LocalPeer())
log.Debugf("-------------------------------------------------------")
_, cancel := context.WithCancel(context.Background())
got := map[peer.ID]int{}
errChan := make(chan error, MsgNum*len(swarms))
streamChan := make(chan network.Stream, MsgNum)
// send out "ping" x MsgNum to every peer
go func() {
defer close(streamChan)
var wg sync.WaitGroup
send := func(p peer.ID) {
defer wg.Done()
// first, one stream per peer (nice)
stream, err := s1.NewStream(context.Background(), p)
if err != nil {
errChan <- err
return
}
// send out ping!
for k := 0; k < MsgNum; k++ { // with k messages
msg := "ping"
log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k)
if _, err := stream.Write([]byte(msg)); err != nil {
errChan <- err
continue
}
}
| if s2.LocalPeer() == s1.LocalPeer() {
continue // dont send to self...
}
wg.Add(1)
go send(s2.LocalPeer())
}
wg.Wait()
}()
// receive "pong" x MsgNum from every peer
go func() {
defer close(errChan)
count := 0
countShouldBe := MsgNum * (len(swarms) - 1)
for stream := range streamChan { // one per peer
// get peer on the other side
p := stream.Conn().RemotePeer()
// receive pings
msgCount := 0
msg := make([]byte, 4)
for k := 0; k < MsgNum; k++ { // with k messages
// read from the stream
if _, err := stream.Read(msg); err != nil {
errChan <- err
continue
}
if string(msg) != "pong" {
errChan <- fmt.Errorf("unexpected message: %s", msg)
continue
}
log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k)
msgCount++
}
got[p] = msgCount
count += msgCount
stream.Close()
}
if count != countShouldBe {
errChan <- fmt.Errorf("count mismatch: %d != %d", count, countShouldBe)
}
}()
// check any errors (blocks till consumer is done)
for err := range errChan {
if err != nil {
t.Error(err.Error())
}
}
log.Debugf("%s got pongs", s1.LocalPeer())
if (len(swarms) - 1) != len(got) {
t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms))
}
for p, n := range got {
if n != MsgNum {
t.Error("peer did not get all msgs", p, n, "/", MsgNum)
}
}
cancel()
<-time.After(10 * time.Millisecond)
}
}
func TestSwarm(t *testing.T) {
t.Parallel()
subtestSwarm(t, 5, 100)
}
func TestBasicSwarm(t *testing.T) {
// t.Skip("skipping for another test")
t.Parallel()
subtestSwarm(t, 2, 1)
}
func TestConnectionGating(t *testing.T) {
ctx := context.Background()
tcs := map[string]struct {
p1Gater func(gater *MockConnectionGater) *MockConnectionGater
p2Gater func(gater *MockConnectionGater) *MockConnectionGater
p1ConnectednessToP2 network.Connectedness
p2ConnectednessToP1 network.Connectedness
isP1OutboundErr bool
disableOnQUIC bool
}{
"no gating": {
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
"p1 gates outbound peer dial": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p1 gates outbound addr dialing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(p peer.ID, addr ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 accepts inbound peer dial if outgoing dial is gated": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(peer.ID, ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
"p2 gates inbound peer dial before securing": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Accept = func(c network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
// QUIC gates the connection after completion of the handshake
disableOnQUIC: true,
},
"p2 gates inbound peer dial before multiplexing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates inbound peer dial after upgrading": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates outbound dials": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
}
for n, tc := range tcs {
for _, useQuic := range []bool{false, true} {
trString := "TCP"
optTransport := OptDisableQUIC
if useQuic {
if tc.disableOnQUIC {
continue
}
trString = "QUIC"
optTransport = OptDisableTCP
}
t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) {
p1Gater := DefaultMockConnectionGater()
p2Gater := DefaultMockConnectionGater()
if tc.p1Gater != nil {
p1Gater = tc.p1Gater(p1Gater)
}
if tc.p2Gater != nil {
p2Gater = tc.p2Gater(p2Gater)
}
sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
p1 := sw1.LocalPeer()
p2 := sw2.LocalPeer()
sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL)
// 1 -> 2
_, err := sw1.DialPeer(ctx, p2)
require.Equal(t, tc.is | // read it later
streamChan <- stream
}
for _, s2 := range swarms { | random_line_split |
swarm_test.go | Reuseport)
// connect everyone
connectSwarms(t, context.Background(), swarms)
// ping/pong
for _, s1 := range swarms {
log.Debugf("-------------------------------------------------------")
log.Debugf("%s ping pong round", s1.LocalPeer())
log.Debugf("-------------------------------------------------------")
_, cancel := context.WithCancel(context.Background())
got := map[peer.ID]int{}
errChan := make(chan error, MsgNum*len(swarms))
streamChan := make(chan network.Stream, MsgNum)
// send out "ping" x MsgNum to every peer
go func() {
defer close(streamChan)
var wg sync.WaitGroup
send := func(p peer.ID) {
defer wg.Done()
// first, one stream per peer (nice)
stream, err := s1.NewStream(context.Background(), p)
if err != nil {
errChan <- err
return
}
// send out ping!
for k := 0; k < MsgNum; k++ { // with k messages
msg := "ping"
log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k)
if _, err := stream.Write([]byte(msg)); err != nil {
errChan <- err
continue
}
}
// read it later
streamChan <- stream
}
for _, s2 := range swarms {
if s2.LocalPeer() == s1.LocalPeer() |
wg.Add(1)
go send(s2.LocalPeer())
}
wg.Wait()
}()
// receive "pong" x MsgNum from every peer
go func() {
defer close(errChan)
count := 0
countShouldBe := MsgNum * (len(swarms) - 1)
for stream := range streamChan { // one per peer
// get peer on the other side
p := stream.Conn().RemotePeer()
// receive pings
msgCount := 0
msg := make([]byte, 4)
for k := 0; k < MsgNum; k++ { // with k messages
// read from the stream
if _, err := stream.Read(msg); err != nil {
errChan <- err
continue
}
if string(msg) != "pong" {
errChan <- fmt.Errorf("unexpected message: %s", msg)
continue
}
log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k)
msgCount++
}
got[p] = msgCount
count += msgCount
stream.Close()
}
if count != countShouldBe {
errChan <- fmt.Errorf("count mismatch: %d != %d", count, countShouldBe)
}
}()
// check any errors (blocks till consumer is done)
for err := range errChan {
if err != nil {
t.Error(err.Error())
}
}
log.Debugf("%s got pongs", s1.LocalPeer())
if (len(swarms) - 1) != len(got) {
t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms))
}
for p, n := range got {
if n != MsgNum {
t.Error("peer did not get all msgs", p, n, "/", MsgNum)
}
}
cancel()
<-time.After(10 * time.Millisecond)
}
}
func TestSwarm(t *testing.T) {
t.Parallel()
subtestSwarm(t, 5, 100)
}
func TestBasicSwarm(t *testing.T) {
// t.Skip("skipping for another test")
t.Parallel()
subtestSwarm(t, 2, 1)
}
func TestConnectionGating(t *testing.T) {
ctx := context.Background()
tcs := map[string]struct {
p1Gater func(gater *MockConnectionGater) *MockConnectionGater
p2Gater func(gater *MockConnectionGater) *MockConnectionGater
p1ConnectednessToP2 network.Connectedness
p2ConnectednessToP1 network.Connectedness
isP1OutboundErr bool
disableOnQUIC bool
}{
"no gating": {
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
"p1 gates outbound peer dial": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p1 gates outbound addr dialing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(p peer.ID, addr ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 accepts inbound peer dial if outgoing dial is gated": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(peer.ID, ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
"p2 gates inbound peer dial before securing": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Accept = func(c network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
// QUIC gates the connection after completion of the handshake
disableOnQUIC: true,
},
"p2 gates inbound peer dial before multiplexing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates inbound peer dial after upgrading": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates outbound dials": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
}
for n, tc := range tcs {
for _, useQuic := range []bool{false, true} {
trString := "TCP"
optTransport := OptDisableQUIC
if useQuic {
if tc.disableOnQUIC {
continue
}
trString = "QUIC"
optTransport = OptDisableTCP
}
t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) {
p1Gater := DefaultMockConnectionGater()
p2Gater := DefaultMockConnectionGater()
if tc.p1Gater != nil {
p1Gater = tc.p1Gater(p1Gater)
}
if tc.p2Gater != nil {
p2Gater = tc.p2Gater(p2Gater)
}
sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
p1 := sw1.LocalPeer()
p2 := sw2.LocalPeer()
sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL)
// 1 -> 2
_, err := sw1.DialPeer(ctx, p2)
require.Equal(t, | {
continue // dont send to self...
} | conditional_block |
swarm_test.go | boundErr: true,
},
"p2 accepts inbound peer dial if outgoing dial is gated": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(peer.ID, ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
"p2 gates inbound peer dial before securing": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Accept = func(c network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
// QUIC gates the connection after completion of the handshake
disableOnQUIC: true,
},
"p2 gates inbound peer dial before multiplexing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates inbound peer dial after upgrading": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 }
return c
},
p1ConnectednessToP2: network.NotConnected,
p2ConnectednessToP1: network.NotConnected,
isP1OutboundErr: true,
},
"p2 gates outbound dials": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
p2ConnectednessToP1: network.Connected,
isP1OutboundErr: false,
},
}
for n, tc := range tcs {
for _, useQuic := range []bool{false, true} {
trString := "TCP"
optTransport := OptDisableQUIC
if useQuic {
if tc.disableOnQUIC {
continue
}
trString = "QUIC"
optTransport = OptDisableTCP
}
t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) {
p1Gater := DefaultMockConnectionGater()
p2Gater := DefaultMockConnectionGater()
if tc.p1Gater != nil {
p1Gater = tc.p1Gater(p1Gater)
}
if tc.p2Gater != nil {
p2Gater = tc.p2Gater(p2Gater)
}
sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
p1 := sw1.LocalPeer()
p2 := sw2.LocalPeer()
sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL)
// 1 -> 2
_, err := sw1.DialPeer(ctx, p2)
require.Equal(t, tc.isP1OutboundErr, err != nil, n)
require.Equal(t, tc.p1ConnectednessToP2, sw1.Connectedness(p2), n)
require.Eventually(t, func() bool {
return tc.p2ConnectednessToP1 == sw2.Connectedness(p1)
}, 2*time.Second, 100*time.Millisecond, n)
})
}
}
}
func TestNoDial(t *testing.T) {
swarms := makeSwarms(t, 2)
_, err := swarms[0].NewStream(network.WithNoDial(context.Background(), "swarm test"), swarms[1].LocalPeer())
if err != network.ErrNoConn {
t.Fatal("should have failed with ErrNoConn")
}
}
func TestCloseWithOpenStreams(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(t, 2)
connectSwarms(t, ctx, swarms)
s, err := swarms[0].NewStream(ctx, swarms[1].LocalPeer())
require.NoError(t, err)
defer s.Close()
// close swarm before stream.
require.NoError(t, swarms[0].Close())
}
func TestTypedNilConn(t *testing.T) {
s := GenSwarm(t)
defer s.Close()
// We can't dial ourselves.
c, err := s.DialPeer(context.Background(), s.LocalPeer())
require.Error(t, err)
// If we fail to dial, the connection should be nil.
require.Nil(t, c)
}
func TestPreventDialListenAddr(t *testing.T) {
s := GenSwarm(t, OptDialOnly)
if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1")); err != nil {
t.Fatal(err)
}
addrs, err := s.InterfaceListenAddresses()
if err != nil {
t.Fatal(err)
}
var addr ma.Multiaddr
for _, a := range addrs {
_, s, err := manet.DialArgs(a)
if err != nil {
t.Fatal(err)
}
if strings.Split(s, ":")[0] == "127.0.0.1" {
addr = a
break
}
}
remote := test.RandPeerIDFatal(t)
s.Peerstore().AddAddr(remote, addr, time.Hour)
_, err = s.DialPeer(context.Background(), remote)
if !errors.Is(err, swarm.ErrNoGoodAddresses) {
t.Fatal("expected dial to fail: %w", err)
}
}
func TestStreamCount(t *testing.T) {
s1 := GenSwarm(t)
s2 := GenSwarm(t)
connectSwarms(t, context.Background(), []*swarm.Swarm{s2, s1})
countStreams := func() (n int) {
var num int
for _, c := range s1.ConnsToPeer(s2.LocalPeer()) {
n += c.Stat().NumStreams
num += len(c.GetStreams())
}
require.Equal(t, n, num, "inconsistent stream count")
return
}
streams := make(chan network.Stream, 20)
streamAccepted := make(chan struct{}, 1)
s1.SetStreamHandler(func(str network.Stream) {
streams <- str
streamAccepted <- struct{}{}
})
for i := 0; i < 10; i++ {
str, err := s2.NewStream(context.Background(), s1.LocalPeer())
require.NoError(t, err)
str.Write([]byte("foobar"))
<-streamAccepted
}
require.Eventually(t, func() bool { return len(streams) == 10 }, 5*time.Second, 10*time.Millisecond)
require.Equal(t, countStreams(), 10)
(<-streams).Reset()
(<-streams).Close()
require.Equal(t, countStreams(), 8)
str, err := s1.NewStream(context.Background(), s2.LocalPeer())
require.NoError(t, err)
require.Equal(t, countStreams(), 9)
str.Close()
require.Equal(t, countStreams(), 8)
}
func TestResourceManager(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
defer s1.Close()
rcmgr2 := mocknetwork.NewMockResourceManager(ctrl)
s2 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr2)))
defer s2.Close()
connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
strChan := make(chan network.Stream)
s2.SetStreamHandler(func(str network.Stream) { strChan <- str })
streamScope1 := mocknetwork.NewMockStreamManagementScope(ctrl)
rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(streamScope1, nil)
streamScope2 := mocknetwork.NewMockStreamManagementScope(ctrl)
rcmgr2.EXPECT().OpenStream(s1.LocalPeer(), network.DirInbound).Return(streamScope2, nil)
str, err := s1.NewStream(context.Background(), s2.LocalPeer())
require.NoError(t, err)
defer str.Close()
str.Write([]byte("foobar"))
p := protocol.ID("proto")
streamScope1.EXPECT().SetProtocol(p)
require.NoError(t, str.SetProtocol(p))
sstr := <-strChan
streamScope2.EXPECT().Done()
require.NoError(t, sstr.Close())
streamScope1.EXPECT().Done()
}
func | TestResourceManagerNewStream | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.