file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
train_nn.py | (object):
def __init__(self, data_dir, model_dir, task_id, isInteractive=True, OOV=False,
memory_size=50, random_state=None, batch_size=32, learning_rate=0.001, epsilon=1e-8,
max_grad_norm=40.0, evaluation_interval=10, hops=3, epochs=200, embedding_size=20, save_model=10,
checkpoint_path='./models', optim='adam', momentum=0.9, decay=0, gamma=0.1, step=30):
self.data_dir = data_dir
self.task_id = task_id
self.model_dir = model_dir
self.isInteractive = isInteractive
self.OOV = OOV
self.memory_size = memory_size
self.random_state = random_state
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epsilon = epsilon
self.max_grad_norm = max_grad_norm
self.evaluation_interval = evaluation_interval
self.hops = hops
self.epochs = epochs
self.embedding_size = embedding_size
self.save_model = save_model
self.checkpoint_path = checkpoint_path
self.optim = optim
self.momentum = momentum
self.decay = decay
self.gamma = gamma
self.step = step
self.train_dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=0, batch_size=self.batch_size, nn=False) # 0->train, 1->validate, 2->test
self.model = MemN2NDialog(batch_size=self.batch_size, vocab_size=self.train_dataset.getParam('vocab_size'),
candidate_size=self.train_dataset.getParam('candidate_sentence_size'), sentence_size=self.train_dataset.getParam('sentence_size'),
candidates_vec=self.train_dataset.getParam('candidates_vec'), embedding_size=self.embedding_size, hops=self.hops,
learning_rate=self.learning_rate, max_grad_norm=self.max_grad_norm, task_id=self.task_id)
if torch.cuda.is_available():
self.model = self.model.cuda()
def train(self):
trainS, trainQ, trainA = self.train_dataset.getData()
assert len(trainS) == len(trainQ) and len(trainQ) == len(trainA)
n_train = len(trainS)
batches = zip(range(0, n_train - self.batch_size, self.batch_size),
range(self.batch_size, n_train, self.batch_size))
batches = [(start, end) for start, end in batches]
if self.optim == 'sgd':
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("SGD optimizer")
elif self.optim == 'rms':
optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("RMSprop optimizer")
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
print("Adam optimizer")
scheduler = None
if self.decay:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.step, gamma=self.gamma)
print("Decay learning rate initialized")
for epoch in range(self.epochs):
print('epoch :', epoch)
if self.decay:
scheduler.step()
np.random.shuffle(batches)
running_loss = 0.0
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
optimizer.zero_grad()
running_loss += self.model.batch_train(s, q, a)
optimizer.step()
print('loss = ',running_loss / n_train)
#-----------------------Save model after every nth epoch-----------------------------------
if epoch % self.save_model == 0:
print("Saving models")
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
model_name = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
torch.save(self.model.state_dict(), model_name)
self.test(0)
self.test(1)
self.test(2)
#------------------------------------------------------------------------------------------
def test(self,data_type):
# 0->train, 1->validate, 2->test
print("----------------------------------------------------------------------")
print("STARTED TESTING: ", data_type)
dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=data_type, batch_size=self.batch_size) # 0->train, 1->validate, 2->test
testS, testQ, testA = dataset.getData()
assert len(testS) == len(testQ) and len(testQ) == len(testA)
n_test = len(testS)
fname = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
self.model.load_state_dict(torch.load(fname))
acc, loss = self.model.test(testS, testQ, testA)
print('Accuracy = ', acc)
print("----------------------------------------------------------------------")
def build_vocab(self, data, candidates):
vocab = reduce(lambda x, y: x | y, (set(
list(chain.from_iterable(s)) + q) for s, q, a in data))
vocab |= reduce(lambda x, y: x | y, (set(candidate)
for candidate in candidates))
vocab = sorted(vocab)
self.word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
self.sentence_size = max(
map(len, chain.from_iterable(s for s, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
query_size = max(map(len, (q for _, q, _ in data)))
self.memory_size = min(self.memory_size, max_story_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for nil word
self.sentence_size = max(
query_size, self.sentence_size) # for the position
# params
print("vocab size:", self.vocab_size)
print("Longest sentence length", self.sentence_size)
print("Longest candidate sentence length",
self.candidate_sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
def main(params):
model_dir = "task" + str(params['task_id']) + "_" + params['model_dir']
if not os.path.exists(model_dir):
os.makedirs(model_dir)
chatbot = chatBot(data_dir=params['data_dir'], model_dir=model_dir, task_id=params['task_id'], isInteractive=params['interactive'], OOV=params['OOV'], memory_size=params['memory_size'], random_state=params['random_state'], batch_size=params['batch_size'],
learning_rate=params['learning_rate'], epsilon=params['epsilon'], max_grad_norm=params['max_grad_norm'], evaluation_interval=params['evaluation_interval'], hops=params['hops'], epochs=params['epochs'], embedding_size=params['embedding_size'],
save_model=params['save_model'], checkpoint_path=params['checkpoint_path'], optim=params['optim'], momentum=params['momentum'],
decay=params['decay'], gamma=params['gamma'], step=params['step'])
if params['train']:
chatbot.train()
else:
chatbot.test(0)
chatbot.test(1)
chatbot.test(2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', default=0.001, type=float,
help='Learning rate for Optimizer')
parser.add_argument('--epsilon', default=1e-8, type=float,
help='Epsilon value for Adam Optimizer')
parser.add_argument('--max_grad_norm', default=40.0, type=float,
help='Clip gradients to this norm')
parser.add_argument('--evaluation_interval', default=10, type=int,
help='Evaluate and print results every x epochs')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
parser.add_argument('--hops', default=3, type=int, help='Number of hops in the Memory Network')
parser.add_argument('--epochs', default=200, type=int, help='Number of epochs to train for')
parser.add_argument('--embedding_size', default=20, type=int,
help='Embedding size for embedding matrices')
parser.add_argument('--memory_size', default=50, type=int, help='Maximum size of memory')
parser.add_argument('--task_id', default=6, type=int, help='bAbI task id, 1 <= id <= 6')
parser.add_argument('--random_state', default=None, help='Random state')
parser.add_argument('--data_dir', default='data/dialog-bAbI-tasks/',
help='Directory containing bAbI tasks')
parser.add_argument('--model_dir', default='model/',
help='Directory containing memn2n model checkpoints')
parser.add_argument('--train', default=1, type=int, help='Train if True, test if False')
parser.add_argument('--interactive', default=0, type=int, help='if True, interactive')
parser.add_argument('--OOV', default=0, type | chatBot | identifier_name |
|
train_nn.py | , data_dir, model_dir, task_id, isInteractive=True, OOV=False,
memory_size=50, random_state=None, batch_size=32, learning_rate=0.001, epsilon=1e-8,
max_grad_norm=40.0, evaluation_interval=10, hops=3, epochs=200, embedding_size=20, save_model=10,
checkpoint_path='./models', optim='adam', momentum=0.9, decay=0, gamma=0.1, step=30):
self.data_dir = data_dir
self.task_id = task_id
self.model_dir = model_dir
self.isInteractive = isInteractive
self.OOV = OOV
self.memory_size = memory_size
self.random_state = random_state
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epsilon = epsilon
self.max_grad_norm = max_grad_norm
self.evaluation_interval = evaluation_interval
self.hops = hops
self.epochs = epochs
self.embedding_size = embedding_size
self.save_model = save_model
self.checkpoint_path = checkpoint_path
self.optim = optim
self.momentum = momentum
self.decay = decay
self.gamma = gamma
self.step = step
self.train_dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=0, batch_size=self.batch_size, nn=False) # 0->train, 1->validate, 2->test
self.model = MemN2NDialog(batch_size=self.batch_size, vocab_size=self.train_dataset.getParam('vocab_size'),
candidate_size=self.train_dataset.getParam('candidate_sentence_size'), sentence_size=self.train_dataset.getParam('sentence_size'),
candidates_vec=self.train_dataset.getParam('candidates_vec'), embedding_size=self.embedding_size, hops=self.hops,
learning_rate=self.learning_rate, max_grad_norm=self.max_grad_norm, task_id=self.task_id)
if torch.cuda.is_available():
self.model = self.model.cuda()
def train(self):
trainS, trainQ, trainA = self.train_dataset.getData()
assert len(trainS) == len(trainQ) and len(trainQ) == len(trainA)
n_train = len(trainS)
batches = zip(range(0, n_train - self.batch_size, self.batch_size),
range(self.batch_size, n_train, self.batch_size))
batches = [(start, end) for start, end in batches]
if self.optim == 'sgd':
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("SGD optimizer")
elif self.optim == 'rms':
optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("RMSprop optimizer")
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
print("Adam optimizer")
scheduler = None
if self.decay:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.step, gamma=self.gamma)
print("Decay learning rate initialized")
for epoch in range(self.epochs):
print('epoch :', epoch)
if self.decay:
scheduler.step()
np.random.shuffle(batches)
running_loss = 0.0
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
optimizer.zero_grad()
running_loss += self.model.batch_train(s, q, a)
optimizer.step()
print('loss = ',running_loss / n_train)
#-----------------------Save model after every nth epoch-----------------------------------
if epoch % self.save_model == 0:
print("Saving models")
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
model_name = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
torch.save(self.model.state_dict(), model_name)
self.test(0)
self.test(1)
self.test(2)
#------------------------------------------------------------------------------------------
def test(self,data_type):
# 0->train, 1->validate, 2->test
print("----------------------------------------------------------------------")
print("STARTED TESTING: ", data_type)
dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=data_type, batch_size=self.batch_size) # 0->train, 1->validate, 2->test
testS, testQ, testA = dataset.getData()
assert len(testS) == len(testQ) and len(testQ) == len(testA)
n_test = len(testS)
fname = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
self.model.load_state_dict(torch.load(fname))
acc, loss = self.model.test(testS, testQ, testA)
print('Accuracy = ', acc)
print("----------------------------------------------------------------------")
def build_vocab(self, data, candidates):
vocab = reduce(lambda x, y: x | y, (set(
list(chain.from_iterable(s)) + q) for s, q, a in data))
vocab |= reduce(lambda x, y: x | y, (set(candidate)
for candidate in candidates))
vocab = sorted(vocab)
self.word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
self.sentence_size = max(
map(len, chain.from_iterable(s for s, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
query_size = max(map(len, (q for _, q, _ in data)))
self.memory_size = min(self.memory_size, max_story_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for nil word
self.sentence_size = max(
query_size, self.sentence_size) # for the position
# params
print("vocab size:", self.vocab_size)
print("Longest sentence length", self.sentence_size)
print("Longest candidate sentence length",
self.candidate_sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
def main(params):
model_dir = "task" + str(params['task_id']) + "_" + params['model_dir']
if not os.path.exists(model_dir):
os.makedirs(model_dir)
chatbot = chatBot(data_dir=params['data_dir'], model_dir=model_dir, task_id=params['task_id'], isInteractive=params['interactive'], OOV=params['OOV'], memory_size=params['memory_size'], random_state=params['random_state'], batch_size=params['batch_size'],
learning_rate=params['learning_rate'], epsilon=params['epsilon'], max_grad_norm=params['max_grad_norm'], evaluation_interval=params['evaluation_interval'], hops=params['hops'], epochs=params['epochs'], embedding_size=params['embedding_size'],
save_model=params['save_model'], checkpoint_path=params['checkpoint_path'], optim=params['optim'], momentum=params['momentum'],
decay=params['decay'], gamma=params['gamma'], step=params['step'])
if params['train']:
chatbot.train()
else:
chatbot.test(0)
chatbot.test(1)
chatbot.test(2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', default=0.001, type=float,
help='Learning rate for Optimizer')
parser.add_argument('--epsilon', default=1e-8, type=float,
help='Epsilon value for Adam Optimizer')
parser.add_argument('--max_grad_norm', default=40.0, type=float,
help='Clip gradients to this norm')
parser.add_argument('--evaluation_interval', default=10, type=int, | parser.add_argument('--epochs', default=200, type=int, help='Number of epochs to train for')
parser.add_argument('--embedding_size', default=20, type=int,
help='Embedding size for embedding matrices')
parser.add_argument('--memory_size', default=50, type=int, help='Maximum size of memory')
parser.add_argument('--task_id', default=6, type=int, help='bAbI task id, 1 <= id <= 6')
parser.add_argument('--random_state', default=None, help='Random state')
parser.add_argument('--data_dir', default='data/dialog-bAbI-tasks/',
help='Directory containing bAbI tasks')
parser.add_argument('--model_dir', default='model/',
help='Directory containing memn2n model checkpoints')
parser.add_argument('--train', default=1, type=int, help='Train if True, test if False')
parser.add_argument('--interactive', default=0, type=int, help='if True, interactive')
parser.add_argument('--OOV', default=0, type=int, help='if True, use OOV | help='Evaluate and print results every x epochs')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
parser.add_argument('--hops', default=3, type=int, help='Number of hops in the Memory Network') | random_line_split |
train_nn.py | , data_dir, model_dir, task_id, isInteractive=True, OOV=False,
memory_size=50, random_state=None, batch_size=32, learning_rate=0.001, epsilon=1e-8,
max_grad_norm=40.0, evaluation_interval=10, hops=3, epochs=200, embedding_size=20, save_model=10,
checkpoint_path='./models', optim='adam', momentum=0.9, decay=0, gamma=0.1, step=30):
self.data_dir = data_dir
self.task_id = task_id
self.model_dir = model_dir
self.isInteractive = isInteractive
self.OOV = OOV
self.memory_size = memory_size
self.random_state = random_state
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epsilon = epsilon
self.max_grad_norm = max_grad_norm
self.evaluation_interval = evaluation_interval
self.hops = hops
self.epochs = epochs
self.embedding_size = embedding_size
self.save_model = save_model
self.checkpoint_path = checkpoint_path
self.optim = optim
self.momentum = momentum
self.decay = decay
self.gamma = gamma
self.step = step
self.train_dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=0, batch_size=self.batch_size, nn=False) # 0->train, 1->validate, 2->test
self.model = MemN2NDialog(batch_size=self.batch_size, vocab_size=self.train_dataset.getParam('vocab_size'),
candidate_size=self.train_dataset.getParam('candidate_sentence_size'), sentence_size=self.train_dataset.getParam('sentence_size'),
candidates_vec=self.train_dataset.getParam('candidates_vec'), embedding_size=self.embedding_size, hops=self.hops,
learning_rate=self.learning_rate, max_grad_norm=self.max_grad_norm, task_id=self.task_id)
if torch.cuda.is_available():
self.model = self.model.cuda()
def train(self):
trainS, trainQ, trainA = self.train_dataset.getData()
assert len(trainS) == len(trainQ) and len(trainQ) == len(trainA)
n_train = len(trainS)
batches = zip(range(0, n_train - self.batch_size, self.batch_size),
range(self.batch_size, n_train, self.batch_size))
batches = [(start, end) for start, end in batches]
if self.optim == 'sgd':
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("SGD optimizer")
elif self.optim == 'rms':
optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
print("RMSprop optimizer")
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
print("Adam optimizer")
scheduler = None
if self.decay:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.step, gamma=self.gamma)
print("Decay learning rate initialized")
for epoch in range(self.epochs):
print('epoch :', epoch)
if self.decay:
scheduler.step()
np.random.shuffle(batches)
running_loss = 0.0
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
optimizer.zero_grad()
running_loss += self.model.batch_train(s, q, a)
optimizer.step()
print('loss = ',running_loss / n_train)
#-----------------------Save model after every nth epoch-----------------------------------
if epoch % self.save_model == 0:
print("Saving models")
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
model_name = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
torch.save(self.model.state_dict(), model_name)
self.test(0)
self.test(1)
self.test(2)
#------------------------------------------------------------------------------------------
def test(self,data_type):
# 0->train, 1->validate, 2->test
print("----------------------------------------------------------------------")
print("STARTED TESTING: ", data_type)
dataset = CDATA(data_dir=self.data_dir, task_id=self.task_id, memory_size=self.memory_size,
train=data_type, batch_size=self.batch_size) # 0->train, 1->validate, 2->test
testS, testQ, testA = dataset.getData()
assert len(testS) == len(testQ) and len(testQ) == len(testA)
n_test = len(testS)
fname = os.path.join(self.checkpoint_path, str(self.task_id) + '.pkl')
self.model.load_state_dict(torch.load(fname))
acc, loss = self.model.test(testS, testQ, testA)
print('Accuracy = ', acc)
print("----------------------------------------------------------------------")
def build_vocab(self, data, candidates):
vocab = reduce(lambda x, y: x | y, (set(
list(chain.from_iterable(s)) + q) for s, q, a in data))
vocab |= reduce(lambda x, y: x | y, (set(candidate)
for candidate in candidates))
vocab = sorted(vocab)
self.word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
self.sentence_size = max(
map(len, chain.from_iterable(s for s, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
query_size = max(map(len, (q for _, q, _ in data)))
self.memory_size = min(self.memory_size, max_story_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for nil word
self.sentence_size = max(
query_size, self.sentence_size) # for the position
# params
print("vocab size:", self.vocab_size)
print("Longest sentence length", self.sentence_size)
print("Longest candidate sentence length",
self.candidate_sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
def main(params):
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', default=0.001, type=float,
help='Learning rate for Optimizer')
parser.add_argument('--epsilon', default=1e-8, type=float,
help='Epsilon value for Adam Optimizer')
parser.add_argument('--max_grad_norm', default=40.0, type=float,
help='Clip gradients to this norm')
parser.add_argument('--evaluation_interval', default=10, type=int,
help='Evaluate and print results every x epochs')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
parser.add_argument('--hops', default=3, type=int, help='Number of hops in the Memory Network')
parser.add_argument('--epochs', default=200, type=int, help='Number of epochs to train for')
parser.add_argument('--embedding_size', default=20, type=int,
help='Embedding size for embedding matrices')
parser.add_argument('--memory_size', default=50, type=int, help='Maximum size of memory')
parser.add_argument('--task_id', default=6, type=int, help='bAbI task id, 1 <= id <= 6')
parser.add_argument('--random_state', default=None, help='Random state')
parser.add_argument('--data_dir', default='data/dialog-bAbI-tasks/',
help='Directory containing bAbI tasks')
parser.add_argument('--model_dir', default='model/',
help='Directory containing memn2n model checkpoints')
parser.add_argument('--train', default=1, type=int, help='Train if True, test if False')
parser.add_argument('--interactive', default=0, type=int, help='if True, interactive')
parser.add_argument('--OOV', default=0, type=int, help='if True, use O | model_dir = "task" + str(params['task_id']) + "_" + params['model_dir']
if not os.path.exists(model_dir):
os.makedirs(model_dir)
chatbot = chatBot(data_dir=params['data_dir'], model_dir=model_dir, task_id=params['task_id'], isInteractive=params['interactive'], OOV=params['OOV'], memory_size=params['memory_size'], random_state=params['random_state'], batch_size=params['batch_size'],
learning_rate=params['learning_rate'], epsilon=params['epsilon'], max_grad_norm=params['max_grad_norm'], evaluation_interval=params['evaluation_interval'], hops=params['hops'], epochs=params['epochs'], embedding_size=params['embedding_size'],
save_model=params['save_model'], checkpoint_path=params['checkpoint_path'], optim=params['optim'], momentum=params['momentum'],
decay=params['decay'], gamma=params['gamma'], step=params['step'])
if params['train']:
chatbot.train()
else:
chatbot.test(0)
chatbot.test(1)
chatbot.test(2) | identifier_body |
01_calculate_MMR_from_draws.py | cause
dalynator_dir = '/ihme/centralcomp/dalynator/%s/draws/hdfs/' % model_vers
files = []
for root, dirnames, filenames in os.walk('%s' % dalynator_dir):
for filename in fnmatch.filter(filenames, '*%s.h5' % year_id):
files.append(os.path.join(root, filename))
def read_file(f):
return pd.read_hdf(f, 'data', where=[("'cause_id'==%d & 'measure_id'==1"
"& 'metric_id'==1 & 'sex_id'==2"
"& 'rei_id'==0") % cause_id])
draw_list = []
with cf.ProcessPoolExecutor(max_workers=14) as e:
for df in e.map(read_file, files):
|
draws = pd.concat(draw_list)
draws.reset_index(inplace=True)
draws = draws[draws.age_group_id.isin(ages)]
draws['location_id'] = draws['location_id'].astype('int')
draws['age_group_id'] = draws['age_group_id'].astype('int')
draws['sex_id'] = draws['sex_id'].astype('int')
draws['year_id'] = draws['year_id'].astype('int')
# aggregate and add a teenage death age group
teenage_deaths = draws.copy(deep=True)
teenage_deaths = teenage_deaths[teenage_deaths.age_group_id.isin(range(7, 9))]
teenage_deaths['age_group_id'] = 162 # 10to19
daly_idx = ['location_id', 'year_id', 'age_group_id', 'sex_id', 'cause_id',
'rei_id', 'metric_id', 'measure_id']
teenage_deaths = (teenage_deaths.groupby(daly_idx).sum().reset_index())
draws = draws.append(teenage_deaths)
# load live births
print "loading live births"
query = ('SELECT '
'model.location_id, model.year_id, model.age_group_id, model.sex_id, '
'model.mean_value AS asfr FROM covariate.model '
'JOIN covariate.model_version ON model.model_version_id=model_version'
'.model_version_id JOIN covariate.data_version ON model_version'
'.data_version_id=data_version.data_version_id JOIN shared.covariate '
'ON data_version.covariate_id=covariate.covariate_id '
'WHERE covariate.last_updated_action!="DELETE" AND is_best=1 '
'AND covariate.covariate_id= 13 AND model.age_group_id '
'BETWEEN 7 AND 15 AND model.year_id > 1989')
asfr = query_tools.query_2_df(query, engine=enginer.engines["cov_prod"])
asfr['sex_id'] = 2
query = ('SELECT location_id, year_id, age_group_id, sex_id, mean_pop '
'FROM mortality.output '
'WHERE output_version_id = '
'(SELECT output_version_id FROM mortality.output_version WHERE '
'is_best = 1)')
pop = query_tools.query_2_df(query, engine=enginer.engines["mort_prod"])
births = asfr.merge(pop, on=index_cols, how='inner')
births['births'] = births['asfr'] * births['mean_pop']
sds_births = births.copy(deep=True)
def format_births(df):
index_cols = ['location_id', 'year_id', 'age_group_id', 'sex_id']
birth_column = ['births']
keep_columns = index_cols + birth_column
return df[keep_columns]
################################
# Aggregate up Cod and Outputs
################################
# get cod and outputs location hierarchy
print "getting location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 35 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
# load regional scalars for cod and outputs location aggregation
print "loading and reshaping regional scalars"
region_locs = loc_df[loc_df["location_type_id"] == 6]['location_id'].tolist()
scalar_list = []
root_dir = '/home/j/WORK/10_gbd/01_dalynator/02_inputs/region_scalars'
folders = os.listdir(root_dir)
folders = filter(lambda a: 'archive' not in a, folders)
folders.sort()
inner_folder = int(folders[-1])
scalar_dir = '%s/%d' % (root_dir, inner_folder)
for geo in region_locs:
for year in range(1990, 2016):
scalar_df = pd.read_stata('%s/%s_%s_scaling_pop.dta'
% (scalar_dir, geo, year))
scalar_list.append(scalar_df)
scalar = pd.concat(scalar_list)
scalar = scalar[scalar.age_group_id.isin(range(7, 16))]
# get most detailed locations
print "getting most-detailed locations"
most_detailed = (loc_df.ix[loc_df['most_detailed'] == 1]['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
birth_locations = births.merge(loc_df,
on='location_id',
how='left')
birth_loc_list = (birth_locations[birth_locations.most_detailed == 1
]['location_id']
.drop_duplicates().tolist())
if len(set(most_detailed) - set(birth_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(most_detailed) -
set(birth_loc_list))])))
else:
print "No missing locations!"
# merge on cod and outputs location hierarchy
print "merging on location hierarchy"
births = format_births(births)
data = births.copy(deep=True)
data = data.ix[data['location_id'].isin(most_detailed)]
data = pd.merge(data, loc_df,
on='location_id',
how='left')
max_level = data['level'].max()
print max_level
data = format_births(data)
# loop through cod and outputs levels and aggregate
for level in xrange(max_level, 0, -1):
print "Level:", level
data = pd.merge(data, loc_df[['location_id',
'level',
'parent_id']],
on='location_id',
how='left')
temp = data.ix[data['level'] == level].copy(deep=True)
if level == 2: # if we're at the region level, use regional scalars
temp = pd.merge(temp, scalar, on=['location_id',
'year_id',
'age_group_id',
'sex_id'],
how='inner')
temp['births'] = temp['births'] * temp['scaling_factor']
temp.drop('scaling_factor', axis=1, inplace=True)
temp['location_id'] = temp['parent_id']
temp = format_births(temp)
temp = temp.groupby(index_cols).sum().reset_index()
data = pd.concat([format_births(data), temp]).reset_index(drop=True)
births = data.copy(deep=True)
################################
# Aggregate up SDS Comp Hierarchy
################################
# get sds computation hierarchy
print "getting sds location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 40 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
sds_loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
sds_only = (sds_loc_df.ix[sds_loc_df['level'] == 0]['location_id']
.drop_duplicates().tolist())
sds_most_detailed = (sds_loc_df.ix[sds_loc_df['most_detailed'] == 1]
['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
sds_locations = sds_births.merge(sds_loc_df,
on='location_id',
how='left')
sds_loc_list = (sds_locations[sds_locations.most_detailed == 1]['location_id']
.drop_duplicates().tolist())
if len(set(sds_most_detailed) - set(sds_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(sds_most_detailed) -
set(sds_loc_list))])))
else:
print "No missing locations!"
# merge on sds location hierarchy
print "merging on location hierarchy"
sds_births = format_births(sds_births)
sds_data = sds_births.copy(deep=True)
sds_data = sds_data.ix[sds_data['location_id']. | draw_list.append(df) | conditional_block |
01_calculate_MMR_from_draws.py | cause
dalynator_dir = '/ihme/centralcomp/dalynator/%s/draws/hdfs/' % model_vers
files = []
for root, dirnames, filenames in os.walk('%s' % dalynator_dir):
for filename in fnmatch.filter(filenames, '*%s.h5' % year_id):
files.append(os.path.join(root, filename))
def read_file(f):
|
draw_list = []
with cf.ProcessPoolExecutor(max_workers=14) as e:
for df in e.map(read_file, files):
draw_list.append(df)
draws = pd.concat(draw_list)
draws.reset_index(inplace=True)
draws = draws[draws.age_group_id.isin(ages)]
draws['location_id'] = draws['location_id'].astype('int')
draws['age_group_id'] = draws['age_group_id'].astype('int')
draws['sex_id'] = draws['sex_id'].astype('int')
draws['year_id'] = draws['year_id'].astype('int')
# aggregate and add a teenage death age group
teenage_deaths = draws.copy(deep=True)
teenage_deaths = teenage_deaths[teenage_deaths.age_group_id.isin(range(7, 9))]
teenage_deaths['age_group_id'] = 162 # 10to19
daly_idx = ['location_id', 'year_id', 'age_group_id', 'sex_id', 'cause_id',
'rei_id', 'metric_id', 'measure_id']
teenage_deaths = (teenage_deaths.groupby(daly_idx).sum().reset_index())
draws = draws.append(teenage_deaths)
# load live births
print "loading live births"
query = ('SELECT '
'model.location_id, model.year_id, model.age_group_id, model.sex_id, '
'model.mean_value AS asfr FROM covariate.model '
'JOIN covariate.model_version ON model.model_version_id=model_version'
'.model_version_id JOIN covariate.data_version ON model_version'
'.data_version_id=data_version.data_version_id JOIN shared.covariate '
'ON data_version.covariate_id=covariate.covariate_id '
'WHERE covariate.last_updated_action!="DELETE" AND is_best=1 '
'AND covariate.covariate_id= 13 AND model.age_group_id '
'BETWEEN 7 AND 15 AND model.year_id > 1989')
asfr = query_tools.query_2_df(query, engine=enginer.engines["cov_prod"])
asfr['sex_id'] = 2
query = ('SELECT location_id, year_id, age_group_id, sex_id, mean_pop '
'FROM mortality.output '
'WHERE output_version_id = '
'(SELECT output_version_id FROM mortality.output_version WHERE '
'is_best = 1)')
pop = query_tools.query_2_df(query, engine=enginer.engines["mort_prod"])
births = asfr.merge(pop, on=index_cols, how='inner')
births['births'] = births['asfr'] * births['mean_pop']
sds_births = births.copy(deep=True)
def format_births(df):
index_cols = ['location_id', 'year_id', 'age_group_id', 'sex_id']
birth_column = ['births']
keep_columns = index_cols + birth_column
return df[keep_columns]
################################
# Aggregate up Cod and Outputs
################################
# get cod and outputs location hierarchy
print "getting location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 35 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
# load regional scalars for cod and outputs location aggregation
print "loading and reshaping regional scalars"
region_locs = loc_df[loc_df["location_type_id"] == 6]['location_id'].tolist()
scalar_list = []
root_dir = '/home/j/WORK/10_gbd/01_dalynator/02_inputs/region_scalars'
folders = os.listdir(root_dir)
folders = filter(lambda a: 'archive' not in a, folders)
folders.sort()
inner_folder = int(folders[-1])
scalar_dir = '%s/%d' % (root_dir, inner_folder)
for geo in region_locs:
for year in range(1990, 2016):
scalar_df = pd.read_stata('%s/%s_%s_scaling_pop.dta'
% (scalar_dir, geo, year))
scalar_list.append(scalar_df)
scalar = pd.concat(scalar_list)
scalar = scalar[scalar.age_group_id.isin(range(7, 16))]
# get most detailed locations
print "getting most-detailed locations"
most_detailed = (loc_df.ix[loc_df['most_detailed'] == 1]['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
birth_locations = births.merge(loc_df,
on='location_id',
how='left')
birth_loc_list = (birth_locations[birth_locations.most_detailed == 1
]['location_id']
.drop_duplicates().tolist())
if len(set(most_detailed) - set(birth_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(most_detailed) -
set(birth_loc_list))])))
else:
print "No missing locations!"
# merge on cod and outputs location hierarchy
print "merging on location hierarchy"
births = format_births(births)
data = births.copy(deep=True)
data = data.ix[data['location_id'].isin(most_detailed)]
data = pd.merge(data, loc_df,
on='location_id',
how='left')
max_level = data['level'].max()
print max_level
data = format_births(data)
# loop through cod and outputs levels and aggregate
for level in xrange(max_level, 0, -1):
print "Level:", level
data = pd.merge(data, loc_df[['location_id',
'level',
'parent_id']],
on='location_id',
how='left')
temp = data.ix[data['level'] == level].copy(deep=True)
if level == 2: # if we're at the region level, use regional scalars
temp = pd.merge(temp, scalar, on=['location_id',
'year_id',
'age_group_id',
'sex_id'],
how='inner')
temp['births'] = temp['births'] * temp['scaling_factor']
temp.drop('scaling_factor', axis=1, inplace=True)
temp['location_id'] = temp['parent_id']
temp = format_births(temp)
temp = temp.groupby(index_cols).sum().reset_index()
data = pd.concat([format_births(data), temp]).reset_index(drop=True)
births = data.copy(deep=True)
################################
# Aggregate up SDS Comp Hierarchy
################################
# get sds computation hierarchy
print "getting sds location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 40 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
sds_loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
sds_only = (sds_loc_df.ix[sds_loc_df['level'] == 0]['location_id']
.drop_duplicates().tolist())
sds_most_detailed = (sds_loc_df.ix[sds_loc_df['most_detailed'] == 1]
['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
sds_locations = sds_births.merge(sds_loc_df,
on='location_id',
how='left')
sds_loc_list = (sds_locations[sds_locations.most_detailed == 1]['location_id']
.drop_duplicates().tolist())
if len(set(sds_most_detailed) - set(sds_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(sds_most_detailed) -
set(sds_loc_list))])))
else:
print "No missing locations!"
# merge on sds location hierarchy
print "merging on location hierarchy"
sds_births = format_births(sds_births)
sds_data = sds_births.copy(deep=True)
sds_data = sds_data.ix[sds_data['location_id']. | return pd.read_hdf(f, 'data', where=[("'cause_id'==%d & 'measure_id'==1"
"& 'metric_id'==1 & 'sex_id'==2"
"& 'rei_id'==0") % cause_id]) | identifier_body |
01_calculate_MMR_from_draws.py | (df):
df['measure_id'] = 25
df['metric_id'] = 3
df['cause_id'] = cause_id
return df
# get best dalynator version
query = ('SELECT '
'distinct(val) AS daly_id '
'FROM '
'gbd.gbd_process_version_metadata gpvm '
'JOIN '
'gbd.gbd_process_version USING (gbd_process_version_id) '
'JOIN '
'gbd.compare_version_output USING (compare_version_id) '
'WHERE '
'compare_version_id = (SELECT '
'compare_version_id '
'FROM '
'gbd.compare_version '
'WHERE '
'compare_version_status_id = 1 '
'AND gbd_round_id = 3) '
'AND gpvm.metadata_type_id = 5')
model_vers = query_tools.query_2_df(
query, engine=enginer.engines["gbd_prod"]).loc[0, 'daly_id']
# load dalynator draws for the appropriate cause
dalynator_dir = '/ihme/centralcomp/dalynator/%s/draws/hdfs/' % model_vers
files = []
for root, dirnames, filenames in os.walk('%s' % dalynator_dir):
for filename in fnmatch.filter(filenames, '*%s.h5' % year_id):
files.append(os.path.join(root, filename))
def read_file(f):
return pd.read_hdf(f, 'data', where=[("'cause_id'==%d & 'measure_id'==1"
"& 'metric_id'==1 & 'sex_id'==2"
"& 'rei_id'==0") % cause_id])
draw_list = []
with cf.ProcessPoolExecutor(max_workers=14) as e:
for df in e.map(read_file, files):
draw_list.append(df)
draws = pd.concat(draw_list)
draws.reset_index(inplace=True)
draws = draws[draws.age_group_id.isin(ages)]
draws['location_id'] = draws['location_id'].astype('int')
draws['age_group_id'] = draws['age_group_id'].astype('int')
draws['sex_id'] = draws['sex_id'].astype('int')
draws['year_id'] = draws['year_id'].astype('int')
# aggregate and add a teenage death age group
teenage_deaths = draws.copy(deep=True)
teenage_deaths = teenage_deaths[teenage_deaths.age_group_id.isin(range(7, 9))]
teenage_deaths['age_group_id'] = 162 # 10to19
daly_idx = ['location_id', 'year_id', 'age_group_id', 'sex_id', 'cause_id',
'rei_id', 'metric_id', 'measure_id']
teenage_deaths = (teenage_deaths.groupby(daly_idx).sum().reset_index())
draws = draws.append(teenage_deaths)
# load live births
print "loading live births"
query = ('SELECT '
'model.location_id, model.year_id, model.age_group_id, model.sex_id, '
'model.mean_value AS asfr FROM covariate.model '
'JOIN covariate.model_version ON model.model_version_id=model_version'
'.model_version_id JOIN covariate.data_version ON model_version'
'.data_version_id=data_version.data_version_id JOIN shared.covariate '
'ON data_version.covariate_id=covariate.covariate_id '
'WHERE covariate.last_updated_action!="DELETE" AND is_best=1 '
'AND covariate.covariate_id= 13 AND model.age_group_id '
'BETWEEN 7 AND 15 AND model.year_id > 1989')
asfr = query_tools.query_2_df(query, engine=enginer.engines["cov_prod"])
asfr['sex_id'] = 2
query = ('SELECT location_id, year_id, age_group_id, sex_id, mean_pop '
'FROM mortality.output '
'WHERE output_version_id = '
'(SELECT output_version_id FROM mortality.output_version WHERE '
'is_best = 1)')
pop = query_tools.query_2_df(query, engine=enginer.engines["mort_prod"])
births = asfr.merge(pop, on=index_cols, how='inner')
births['births'] = births['asfr'] * births['mean_pop']
sds_births = births.copy(deep=True)
def format_births(df):
index_cols = ['location_id', 'year_id', 'age_group_id', 'sex_id']
birth_column = ['births']
keep_columns = index_cols + birth_column
return df[keep_columns]
################################
# Aggregate up Cod and Outputs
################################
# get cod and outputs location hierarchy
print "getting location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 35 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
# load regional scalars for cod and outputs location aggregation
print "loading and reshaping regional scalars"
region_locs = loc_df[loc_df["location_type_id"] == 6]['location_id'].tolist()
scalar_list = []
root_dir = '/home/j/WORK/10_gbd/01_dalynator/02_inputs/region_scalars'
folders = os.listdir(root_dir)
folders = filter(lambda a: 'archive' not in a, folders)
folders.sort()
inner_folder = int(folders[-1])
scalar_dir = '%s/%d' % (root_dir, inner_folder)
for geo in region_locs:
for year in range(1990, 2016):
scalar_df = pd.read_stata('%s/%s_%s_scaling_pop.dta'
% (scalar_dir, geo, year))
scalar_list.append(scalar_df)
scalar = pd.concat(scalar_list)
scalar = scalar[scalar.age_group_id.isin(range(7, 16))]
# get most detailed locations
print "getting most-detailed locations"
most_detailed = (loc_df.ix[loc_df['most_detailed'] == 1]['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
birth_locations = births.merge(loc_df,
on='location_id',
how='left')
birth_loc_list = (birth_locations[birth_locations.most_detailed == 1
]['location_id']
.drop_duplicates().tolist())
if len(set(most_detailed) - set(birth_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(most_detailed) -
set(birth_loc_list))])))
else:
print "No missing locations!"
# merge on cod and outputs location hierarchy
print "merging on location hierarchy"
births = format_births(births)
data = births.copy(deep=True)
data = data.ix[data['location_id'].isin(most_detailed)]
data = pd.merge(data, loc_df,
on='location_id',
how='left')
max_level = data['level'].max()
print max_level
data = format_births(data)
# loop through cod and outputs levels and aggregate
for level in xrange(max_level, 0, -1):
print "Level:", level
data = pd.merge(data, loc_df[['location_id',
'level',
'parent_id']],
on='location_id',
how='left')
temp = data.ix[data['level'] == level].copy(deep=True)
if level == 2: # if we're at the region level, use regional scalars
temp = pd.merge(temp, scalar, on=['location_id',
'year_id',
'age_group_id',
'sex_id'],
how='inner')
temp['births'] = temp['births'] * temp['scaling_factor']
temp.drop('scaling_factor', axis=1, inplace=True)
temp['location_id'] = temp['parent_id']
temp = format_births(temp)
temp = temp.groupby(index_cols).sum().reset_index()
data = pd.concat([format_births(data), temp]).reset_index(drop=True)
births = data.copy(deep=True)
################################
# Aggregate up SDS Comp Hierarchy
################################
# get sds computation hierarchy
print "getting sds location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 40 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
sds_loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
sds_only = (sds_loc_df.ix[sds_loc_df['level'] == 0]['location_id']
.drop_duplicates().tolist())
sds_most_detailed = | add_cols | identifier_name |
|
01_calculate_MMR_from_draws.py | query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
# load regional scalars for cod and outputs location aggregation
print "loading and reshaping regional scalars"
region_locs = loc_df[loc_df["location_type_id"] == 6]['location_id'].tolist()
scalar_list = []
root_dir = '/home/j/WORK/10_gbd/01_dalynator/02_inputs/region_scalars'
folders = os.listdir(root_dir)
folders = filter(lambda a: 'archive' not in a, folders)
folders.sort()
inner_folder = int(folders[-1])
scalar_dir = '%s/%d' % (root_dir, inner_folder)
for geo in region_locs:
for year in range(1990, 2016):
scalar_df = pd.read_stata('%s/%s_%s_scaling_pop.dta'
% (scalar_dir, geo, year))
scalar_list.append(scalar_df)
scalar = pd.concat(scalar_list)
scalar = scalar[scalar.age_group_id.isin(range(7, 16))]
# get most detailed locations
print "getting most-detailed locations"
most_detailed = (loc_df.ix[loc_df['most_detailed'] == 1]['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
birth_locations = births.merge(loc_df,
on='location_id',
how='left')
birth_loc_list = (birth_locations[birth_locations.most_detailed == 1
]['location_id']
.drop_duplicates().tolist())
if len(set(most_detailed) - set(birth_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(most_detailed) -
set(birth_loc_list))])))
else:
print "No missing locations!"
# merge on cod and outputs location hierarchy
print "merging on location hierarchy"
births = format_births(births)
data = births.copy(deep=True)
data = data.ix[data['location_id'].isin(most_detailed)]
data = pd.merge(data, loc_df,
on='location_id',
how='left')
max_level = data['level'].max()
print max_level
data = format_births(data)
# loop through cod and outputs levels and aggregate
for level in xrange(max_level, 0, -1):
print "Level:", level
data = pd.merge(data, loc_df[['location_id',
'level',
'parent_id']],
on='location_id',
how='left')
temp = data.ix[data['level'] == level].copy(deep=True)
if level == 2: # if we're at the region level, use regional scalars
temp = pd.merge(temp, scalar, on=['location_id',
'year_id',
'age_group_id',
'sex_id'],
how='inner')
temp['births'] = temp['births'] * temp['scaling_factor']
temp.drop('scaling_factor', axis=1, inplace=True)
temp['location_id'] = temp['parent_id']
temp = format_births(temp)
temp = temp.groupby(index_cols).sum().reset_index()
data = pd.concat([format_births(data), temp]).reset_index(drop=True)
births = data.copy(deep=True)
################################
# Aggregate up SDS Comp Hierarchy
################################
# get sds computation hierarchy
print "getting sds location hierarchy"
query = ('SELECT '
'location_id, level, parent_id, most_detailed, location_type_id '
'FROM '
'shared.location_hierarchy_history lhh '
'JOIN '
'shared.location_set_version lsv USING (location_set_version_id) '
'WHERE '
'lhh.location_set_id = 40 AND '
'lsv.gbd_round = 2015 AND '
'lsv.end_date IS NULL')
sds_loc_df = query_tools.query_2_df(query, engine=enginer.engines["cod_prod"])
sds_only = (sds_loc_df.ix[sds_loc_df['level'] == 0]['location_id']
.drop_duplicates().tolist())
sds_most_detailed = (sds_loc_df.ix[sds_loc_df['most_detailed'] == 1]
['location_id']
.drop_duplicates().tolist())
# check for missing locations
print "checking missing locations"
sds_locations = sds_births.merge(sds_loc_df,
on='location_id',
how='left')
sds_loc_list = (sds_locations[sds_locations.most_detailed == 1]['location_id']
.drop_duplicates().tolist())
if len(set(sds_most_detailed) - set(sds_loc_list)) > 0:
print ("The following locations are missing from the draws %s"
% (', '.join([str(x) for x in list(set(sds_most_detailed) -
set(sds_loc_list))])))
else:
print "No missing locations!"
# merge on sds location hierarchy
print "merging on location hierarchy"
sds_births = format_births(sds_births)
sds_data = sds_births.copy(deep=True)
sds_data = sds_data.ix[sds_data['location_id'].isin(sds_most_detailed)]
sds_data = pd.merge(sds_data, sds_loc_df,
on='location_id',
how='left')
max_level = sds_data['level'].max()
print max_level
# loop through sds hierarchy levels and aggregate
sds_data = format_births(sds_data)
for level in xrange(max_level, 0, -1):
print "Level:", level
sds_data = pd.merge(sds_data, sds_loc_df[['location_id',
'level',
'parent_id']],
on='location_id',
how='left')
temp = sds_data.ix[sds_data['level'] == level].copy(deep=True)
temp['location_id'] = temp['parent_id']
temp = format_births(temp)
temp = temp.groupby(index_cols).sum().reset_index()
sds_data = (pd.concat([format_births(sds_data), temp])
.reset_index(drop=True))
sds_births = sds_data.copy(deep=True)
sds_births = sds_births[sds_births.location_id.isin(sds_only)]
################################
# Add on SDS to other locs
################################
births_all = pd.concat([births, sds_births])
births = births_all.copy(deep=True)
################################
# Aggregating ages and appending
################################
# aggregate births for all maternal-ages
print "aggregating births for all-ages"
all_ages = births.copy(deep=True)
who_ages = births.copy(deep=True)
teen_ages = births.copy(deep=True)
who_ages = who_ages[who_ages.age_group_id.isin(range(8, 15))]
teen_ages = teen_ages[teen_ages.age_group_id.isin(range(7, 9))]
all_ages['age_group_id'] = 169 # 10to54
who_ages['age_group_id'] = 24 # 15to49
teen_ages['age_group_id'] = 162 # 10to19
all_ages = (all_ages.groupby(index_cols).sum().reset_index())
who_ages = (who_ages.groupby(index_cols).sum().reset_index())
teen_ages = (teen_ages.groupby(index_cols).sum().reset_index())
births = births.append(all_ages)
births = births.append(who_ages)
births = births.append(teen_ages)
births = format_births(births)
################################
# Save live births flat file for
# tables and figures, but do this
# just once, not for every
# parallelized job
################################
if cause_id == 366 and year_id == 2015:
births_csv = births.copy(deep=True)
journal_dir = ('/home/j/WORK/10_gbd/04_journals/gbd2015_capstone_'
'lancet_maternal/02_inputs/live_births')
births_csv.to_csv('%s/live_births_mmr%s.csv'
% (journal_dir, process_v), index=False)
################################
# Merge births and deaths
################################
print "merging births and deaths"
draws = draws.merge(births, on=index_cols, how='inner')
draws.drop(['cause_id', 'rei_id', 'metric_id', 'measure_id'],
axis=1, inplace=True)
arc_draws = draws.copy(deep=True)
arc_out_dir = out_dir.rstrip("/single_year")
arc_draws.to_csv('%s/arc_draws_raw_%s_%s.csv'
% (arc_out_dir, cause_id, year_id),
index=False)
################################
# Output MMR
################################
# get mean, upper, lower deaths
print "getting mean, upper, lower"
draws.set_index(index_cols, inplace=True)
summary = draws.filter(like='draw_', axis=1)
summary = summary.transpose().describe(
percentiles=[.025, .975]).transpose()[['mean', '2.5%', '97.5%']]
summary.rename( | columns={'2.5%': 'lower', '97.5%': 'upper'}, inplace=True) | random_line_split |
|
prime_field.rs | pub struct PrimeField<P: Parameters> {
// TODO: un-pub. They are pub so FieldElement can have const-fn constructors.
pub uint: P::UInt,
pub _parameters: PhantomData<P>,
}
/// Required constant parameters for the prime field
// TODO: Fix naming
#[allow(clippy::module_name_repetitions)]
// UInt can not have interior mutability
#[allow(clippy::declare_interior_mutable_const)]
// HACK: Ideally we'd use MontgomeryParameters<UInt: FieldUInt>
// See <https://github.com/rust-lang/rust/issues/52662>
pub trait Parameters: 'static + Send + Sync + Sized {
type UInt: FieldUInt;
/// The modulus to implement in Montgomery form
const MODULUS: Self::UInt;
/// M64 = -MODULUS^(-1) mod 2^64
const M64: u64;
// R1 = 2^256 mod MODULUS
const R1: Self::UInt;
// R2 = 2^512 mod MODULUS
const R2: Self::UInt;
// R3 = 2^768 mod MODULUS
const R3: Self::UInt;
// Generator and quadratic non-residue
const GENERATOR: Self::UInt;
// Multiplicative order: Modulus - 1
const ORDER: Self::UInt;
}
// Derive `MontgomeryParameters` from `Parameters` as `Montgomery<P:
// Parameters>`
struct Montgomery<P: Parameters>(PhantomData<P>);
impl<P: Parameters> MontgomeryParameters for Montgomery<P> {
type UInt = P::UInt;
const M64: u64 = P::M64;
const MODULUS: Self::UInt = P::MODULUS;
const R1: Self::UInt = P::R1;
const R2: Self::UInt = P::R2;
const R3: Self::UInt = P::R3;
}
impl<P: Parameters> PrimeField<P> {
// UInt can not have interior mutability
#[allow(clippy::declare_interior_mutable_const)]
pub const MODULUS: P::UInt = P::MODULUS;
#[inline(always)]
pub fn modulus() -> P::UInt {
P::MODULUS
}
/// The multiplicative order of the field.
///
/// Equal to `modulus() - 1` for prime fields.
#[inline(always)]
pub fn order() -> P::UInt {
P::ORDER
}
#[inline(always)]
pub fn generator() -> Self {
Self::from_montgomery(P::GENERATOR)
}
#[inline(always)]
pub fn as_montgomery(&self) -> &P::UInt {
debug_assert!(self.uint < Self::modulus());
&self.uint
}
/// Construct from `UInt` in Montgomery form.
///
/// This is a trivial function.
// TODO: Make `const fn` after <https://github.com/rust-lang/rust/issues/57563>
#[inline(always)]
pub fn from_montgomery(uint: P::UInt) -> Self {
debug_assert!(uint < Self::modulus());
Self {
uint,
_parameters: PhantomData,
}
}
// TODO: from_radix_str
// #[cfg(feature = "std")]
// pub fn from_hex_str(s: &str) -> Self {
// Self::from(UInt::from_hex_str(s))
// }
/// Convert to `UInt`.
#[inline(always)] // Simple wrapper for `from_montgomery`
pub fn to_uint(&self) -> P::UInt {
debug_assert!(self.uint < Self::modulus());
P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery())
}
/// Construct from `UInt`
///
/// It does the montgomery conversion.
pub fn from_uint(uint: &P::UInt) -> Self {
debug_assert!(uint < &Self::modulus());
Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>())
}
/// Reduce and construct from `UInt`
pub fn from_uint_reduce(uint: &P::UInt) -> Self {
let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero());
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3);
Self::from_montgomery(uint)
}
#[inline(always)]
pub fn double(&self) -> Self {
// TODO: Optimize
self.clone() + self
}
#[inline(always)]
pub fn triple(&self) -> Self {
// TODO: Optimize
self.clone() + self + self
}
}
impl<P: Parameters> Clone for PrimeField<P> {
fn clone(&self) -> Self {
Self::from_montgomery(self.as_montgomery().clone())
}
}
impl<P: Parameters> PartialEq for PrimeField<P> {
fn eq(&self, other: &Self) -> bool {
self.as_montgomery() == other.as_montgomery()
}
}
impl<P: Parameters> Eq for PrimeField<P> {}
/// Implements [`Hash`] when `UInt` does.
impl<U, P> Hash for PrimeField<P>
where
U: FieldUInt + Hash,
P: Parameters<UInt = U>,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_montgomery().hash::<H>(state)
}
}
impl<U, P> fmt::Debug for PrimeField<P>
where
U: FieldUInt + fmt::Debug,
P: Parameters<UInt = U>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "field_element!(\"{:?}\")", self.to_uint())
}
}
impl<P: Parameters> Zero for PrimeField<P> {
#[inline(always)]
fn zero() -> Self {
Self::from_montgomery(P::UInt::zero())
}
#[inline(always)]
fn is_zero(&self) -> bool {
self.as_montgomery().is_zero()
}
}
impl<P: Parameters> One for PrimeField<P> {
#[inline(always)]
fn one() -> Self {
Self::from_montgomery(P::R1)
}
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
#[inline(always)]
fn is_one(&self) -> bool {
self.as_montgomery() == &P::R1
}
}
impl<P: Parameters> AddInline<&Self> for PrimeField<P> {
#[inline(always)]
fn add_inline(&self, rhs: &Self) -> Self {
let result = self.as_montgomery().add_inline(rhs.as_montgomery());
let result = result.reduce_1_inline::<Montgomery<P>>();
Self::from_montgomery(result)
}
}
impl<P: Parameters> SubInline<&Self> for PrimeField<P> {
#[inline(always)]
fn sub_inline(&self, rhs: &Self) -> Self {
let lhs = self.as_montgomery();
let rhs = rhs.as_montgomery();
let borrow = rhs > lhs;
let mut result = lhs.sub_inline(rhs);
if borrow {
result.add_assign_inline(&Self::modulus());
}
Self::from_montgomery(result)
}
}
impl<P: Parameters> NegInline for PrimeField<P> {
#[inline(always)]
fn neg_inline(&self) -> Self {
if self.is_zero() {
Self::zero()
} else {
Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery()))
}
}
}
impl<P: Parameters> SquareInline for PrimeField<P> {
#[inline(always)]
fn square_inline(&self) -> Self {
Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>())
}
}
impl<P: Parameters> MulInline<&Self> for PrimeField<P> {
#[inline(always)]
fn mul_inline(&self, rhs: &Self) -> Self {
Self::from_montgomery(
self.as_montgomery()
.mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()),
)
}
}
impl<P: Parameters> Inv for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
#[inline(always)] // Simple wrapper
fn inv(self) -> Self::Output {
self.as_montgomery()
.inv_redc::<Montgomery<P>>()
.map(PrimeField::<P>::from_montgomery)
}
}
impl<P: Parameters> Pow<usize> for &PrimeField<P> {
type Output = PrimeField<P>;
fn pow(self, exponent: usize) -> Self::Output {
self.pow(&exponent)
}
}
impl<P: Parameters> Pow<isize> for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
fn pow(self, exponent: isize) -> Self::Output {
| // Derive fails for Clone, PartialEq, Eq, Hash | random_line_split |
|
prime_field.rs | }
/// Convert to `UInt`.
#[inline(always)] // Simple wrapper for `from_montgomery`
pub fn to_uint(&self) -> P::UInt {
debug_assert!(self.uint < Self::modulus());
P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery())
}
/// Construct from `UInt`
///
/// It does the montgomery conversion.
pub fn from_uint(uint: &P::UInt) -> Self {
debug_assert!(uint < &Self::modulus());
Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>())
}
/// Reduce and construct from `UInt`
pub fn from_uint_reduce(uint: &P::UInt) -> Self {
let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero());
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3);
Self::from_montgomery(uint)
}
#[inline(always)]
pub fn double(&self) -> Self {
// TODO: Optimize
self.clone() + self
}
#[inline(always)]
pub fn triple(&self) -> Self {
// TODO: Optimize
self.clone() + self + self
}
}
impl<P: Parameters> Clone for PrimeField<P> {
fn clone(&self) -> Self {
Self::from_montgomery(self.as_montgomery().clone())
}
}
impl<P: Parameters> PartialEq for PrimeField<P> {
fn eq(&self, other: &Self) -> bool {
self.as_montgomery() == other.as_montgomery()
}
}
impl<P: Parameters> Eq for PrimeField<P> {}
/// Implements [`Hash`] when `UInt` does.
impl<U, P> Hash for PrimeField<P>
where
U: FieldUInt + Hash,
P: Parameters<UInt = U>,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_montgomery().hash::<H>(state)
}
}
impl<U, P> fmt::Debug for PrimeField<P>
where
U: FieldUInt + fmt::Debug,
P: Parameters<UInt = U>,
{
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "field_element!(\"{:?}\")", self.to_uint())
}
}
impl<P: Parameters> Zero for PrimeField<P> {
#[inline(always)]
fn zero() -> Self {
Self::from_montgomery(P::UInt::zero())
}
#[inline(always)]
fn is_zero(&self) -> bool {
self.as_montgomery().is_zero()
}
}
impl<P: Parameters> One for PrimeField<P> {
#[inline(always)]
fn one() -> Self {
Self::from_montgomery(P::R1)
}
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
#[inline(always)]
fn is_one(&self) -> bool {
self.as_montgomery() == &P::R1
}
}
impl<P: Parameters> AddInline<&Self> for PrimeField<P> {
#[inline(always)]
fn add_inline(&self, rhs: &Self) -> Self {
let result = self.as_montgomery().add_inline(rhs.as_montgomery());
let result = result.reduce_1_inline::<Montgomery<P>>();
Self::from_montgomery(result)
}
}
impl<P: Parameters> SubInline<&Self> for PrimeField<P> {
#[inline(always)]
fn sub_inline(&self, rhs: &Self) -> Self {
let lhs = self.as_montgomery();
let rhs = rhs.as_montgomery();
let borrow = rhs > lhs;
let mut result = lhs.sub_inline(rhs);
if borrow {
result.add_assign_inline(&Self::modulus());
}
Self::from_montgomery(result)
}
}
impl<P: Parameters> NegInline for PrimeField<P> {
#[inline(always)]
fn neg_inline(&self) -> Self {
if self.is_zero() {
Self::zero()
} else {
Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery()))
}
}
}
impl<P: Parameters> SquareInline for PrimeField<P> {
#[inline(always)]
fn square_inline(&self) -> Self {
Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>())
}
}
impl<P: Parameters> MulInline<&Self> for PrimeField<P> {
#[inline(always)]
fn mul_inline(&self, rhs: &Self) -> Self {
Self::from_montgomery(
self.as_montgomery()
.mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()),
)
}
}
impl<P: Parameters> Inv for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
#[inline(always)] // Simple wrapper
fn inv(self) -> Self::Output {
self.as_montgomery()
.inv_redc::<Montgomery<P>>()
.map(PrimeField::<P>::from_montgomery)
}
}
impl<P: Parameters> Pow<usize> for &PrimeField<P> {
type Output = PrimeField<P>;
fn pow(self, exponent: usize) -> Self::Output {
self.pow(&exponent)
}
}
impl<P: Parameters> Pow<isize> for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
fn pow(self, exponent: isize) -> Self::Output {
let negative = exponent < 0;
let abs = exponent.abs() as usize;
if negative {
self.inv().map(|n| n.pow(&abs))
} else {
Some(self.pow(&abs))
}
}
}
impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P>
where
Exponent: Binary,
{
type Output = PrimeField<P>;
fn pow(self, exponent: &Exponent) -> Self::Output {
if let Some(msb) = exponent.most_significant_bit() {
let mut result = Self::Output::one();
let mut square = self.clone();
for i in 0..=msb {
if exponent.bit(i) {
result *= □
}
if i < msb {
square.square_assign();
}
}
result
} else {
// exponent = 0
Self::Output::one()
}
}
}
impl<U, P> Root<usize> for PrimeField<P>
where
U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>,
P: Parameters<UInt = U>,
{
// OPT: replace this with a constant array of roots of unity.
fn root(order: usize) -> Option<Self> {
let order = order as u64;
if let Some((q, rem)) = Self::order().div_rem(order) {
if rem.is_zero() {
Some(Self::generator().pow(&q))
} else {
None
}
} else {
Some(Self::one())
}
}
}
// TODO: Generalize over order type
// Lint has a false positive here
#[allow(single_use_lifetimes)]
impl<U, P> Root<&U> for PrimeField<P>
where
U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>,
P: Parameters<UInt = U>,
{
// OPT: replace this with a constant array of roots of unity.
fn root(order: &P::UInt) -> Option<Self> {
if let Some((q, rem)) = Self::order().div_rem(order) {
if rem.is_zero() {
Some(Self::generator().pow(&q))
} else {
None
}
} else {
Some(Self::one())
}
}
}
impl<U, P> SquareRoot for PrimeField<P>
where
U: FieldUInt + Binary + Shr<usize, Output = U>,
P: Parameters<UInt = U>,
{
fn is_quadratic_residue(&self) -> bool {
self.pow(&(Self::MODULUS >> 1_usize)) != -Self::one()
}
// Tonelli-Shanks square root algorithm for prime fields
// See 'Handbook of Applied Cryptography' algorithm 3.34
// OPT: Use algorithm 3.39 for Proth primes.
fn square_root(&self) -> Option<Self> {
if self.is_zero() {
return Some(Self::zero());
}
if !self.is_quadratic_residue() {
return None;
}
// TODO: Provide as a constant parameter?
// Factor order as `signifcant` * 2 ^ `trailing_zeros`
let trailing_zeros = Self::order().trailing_zeros();
let signifcant = Self::order() >> trailing_zeros;
// The starting value of c in the Tonelli Shanks algorithm. We are using the
// prefered generator, as the quadratic nonresidue the algorithm requires.
let c | fmt | identifier_name |
prime_field.rs | }
/// Convert to `UInt`.
#[inline(always)] // Simple wrapper for `from_montgomery`
pub fn to_uint(&self) -> P::UInt {
debug_assert!(self.uint < Self::modulus());
P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery())
}
/// Construct from `UInt`
///
/// It does the montgomery conversion.
pub fn from_uint(uint: &P::UInt) -> Self {
debug_assert!(uint < &Self::modulus());
Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>())
}
/// Reduce and construct from `UInt`
pub fn from_uint_reduce(uint: &P::UInt) -> Self {
let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero());
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3);
Self::from_montgomery(uint)
}
#[inline(always)]
pub fn double(&self) -> Self {
// TODO: Optimize
self.clone() + self
}
#[inline(always)]
pub fn triple(&self) -> Self {
// TODO: Optimize
self.clone() + self + self
}
}
impl<P: Parameters> Clone for PrimeField<P> {
fn clone(&self) -> Self {
Self::from_montgomery(self.as_montgomery().clone())
}
}
impl<P: Parameters> PartialEq for PrimeField<P> {
fn eq(&self, other: &Self) -> bool {
self.as_montgomery() == other.as_montgomery()
}
}
impl<P: Parameters> Eq for PrimeField<P> {}
/// Implements [`Hash`] when `UInt` does.
impl<U, P> Hash for PrimeField<P>
where
U: FieldUInt + Hash,
P: Parameters<UInt = U>,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_montgomery().hash::<H>(state)
}
}
impl<U, P> fmt::Debug for PrimeField<P>
where
U: FieldUInt + fmt::Debug,
P: Parameters<UInt = U>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "field_element!(\"{:?}\")", self.to_uint())
}
}
impl<P: Parameters> Zero for PrimeField<P> {
#[inline(always)]
fn zero() -> Self {
Self::from_montgomery(P::UInt::zero())
}
#[inline(always)]
fn is_zero(&self) -> bool {
self.as_montgomery().is_zero()
}
}
impl<P: Parameters> One for PrimeField<P> {
#[inline(always)]
fn one() -> Self {
Self::from_montgomery(P::R1)
}
// UInt should not have interior mutability
#[allow(clippy::borrow_interior_mutable_const)]
#[inline(always)]
fn is_one(&self) -> bool {
self.as_montgomery() == &P::R1
}
}
impl<P: Parameters> AddInline<&Self> for PrimeField<P> {
#[inline(always)]
fn add_inline(&self, rhs: &Self) -> Self {
let result = self.as_montgomery().add_inline(rhs.as_montgomery());
let result = result.reduce_1_inline::<Montgomery<P>>();
Self::from_montgomery(result)
}
}
impl<P: Parameters> SubInline<&Self> for PrimeField<P> {
#[inline(always)]
fn sub_inline(&self, rhs: &Self) -> Self {
let lhs = self.as_montgomery();
let rhs = rhs.as_montgomery();
let borrow = rhs > lhs;
let mut result = lhs.sub_inline(rhs);
if borrow {
result.add_assign_inline(&Self::modulus());
}
Self::from_montgomery(result)
}
}
impl<P: Parameters> NegInline for PrimeField<P> {
#[inline(always)]
fn neg_inline(&self) -> Self {
if self.is_zero() {
Self::zero()
} else {
Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery()))
}
}
}
impl<P: Parameters> SquareInline for PrimeField<P> {
#[inline(always)]
fn square_inline(&self) -> Self {
Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>())
}
}
impl<P: Parameters> MulInline<&Self> for PrimeField<P> {
#[inline(always)]
fn mul_inline(&self, rhs: &Self) -> Self {
Self::from_montgomery(
self.as_montgomery()
.mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()),
)
}
}
impl<P: Parameters> Inv for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
#[inline(always)] // Simple wrapper
fn inv(self) -> Self::Output {
self.as_montgomery()
.inv_redc::<Montgomery<P>>()
.map(PrimeField::<P>::from_montgomery)
}
}
impl<P: Parameters> Pow<usize> for &PrimeField<P> {
type Output = PrimeField<P>;
fn pow(self, exponent: usize) -> Self::Output {
self.pow(&exponent)
}
}
impl<P: Parameters> Pow<isize> for &PrimeField<P> {
type Output = Option<PrimeField<P>>;
fn pow(self, exponent: isize) -> Self::Output {
let negative = exponent < 0;
let abs = exponent.abs() as usize;
if negative {
self.inv().map(|n| n.pow(&abs))
} else {
Some(self.pow(&abs))
}
}
}
impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P>
where
Exponent: Binary,
{
type Output = PrimeField<P>;
fn pow(self, exponent: &Exponent) -> Self::Output {
if let Some(msb) = exponent.most_significant_bit() {
let mut result = Self::Output::one();
let mut square = self.clone();
for i in 0..=msb {
if exponent.bit(i) {
result *= □
}
if i < msb {
square.square_assign();
}
}
result
} else {
// exponent = 0
Self::Output::one()
}
}
}
impl<U, P> Root<usize> for PrimeField<P>
where
U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>,
P: Parameters<UInt = U>,
{
// OPT: replace this with a constant array of roots of unity.
fn root(order: usize) -> Option<Self> {
let order = order as u64;
if let Some((q, rem)) = Self::order().div_rem(order) {
if rem.is_zero() | else {
None
}
} else {
Some(Self::one())
}
}
}
// TODO: Generalize over order type
// Lint has a false positive here
#[allow(single_use_lifetimes)]
impl<U, P> Root<&U> for PrimeField<P>
where
U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>,
P: Parameters<UInt = U>,
{
// OPT: replace this with a constant array of roots of unity.
fn root(order: &P::UInt) -> Option<Self> {
if let Some((q, rem)) = Self::order().div_rem(order) {
if rem.is_zero() {
Some(Self::generator().pow(&q))
} else {
None
}
} else {
Some(Self::one())
}
}
}
impl<U, P> SquareRoot for PrimeField<P>
where
U: FieldUInt + Binary + Shr<usize, Output = U>,
P: Parameters<UInt = U>,
{
fn is_quadratic_residue(&self) -> bool {
self.pow(&(Self::MODULUS >> 1_usize)) != -Self::one()
}
// Tonelli-Shanks square root algorithm for prime fields
// See 'Handbook of Applied Cryptography' algorithm 3.34
// OPT: Use algorithm 3.39 for Proth primes.
fn square_root(&self) -> Option<Self> {
if self.is_zero() {
return Some(Self::zero());
}
if !self.is_quadratic_residue() {
return None;
}
// TODO: Provide as a constant parameter?
// Factor order as `signifcant` * 2 ^ `trailing_zeros`
let trailing_zeros = Self::order().trailing_zeros();
let signifcant = Self::order() >> trailing_zeros;
// The starting value of c in the Tonelli Shanks algorithm. We are using the
// prefered generator, as the quadratic nonresidue the algorithm requires.
let c | {
Some(Self::generator().pow(&q))
} | conditional_block |
prime_field.rs | : FieldElement =
field_element!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c");
assert_eq!(SMALL, FieldElement::from(15));
assert_eq!(
NUM,
u256h!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c").into()
);
}
#[test]
fn minus_zero_equals_zero() {
assert!(FieldElement::zero().is_zero());
assert!(field_element!("00").is_zero());
assert_eq!(FieldElement::zero(), FieldElement::zero());
assert_eq!(-FieldElement::zero(), FieldElement::zero());
}
#[test]
fn test_add() {
let a = field_element!("06eabe184aa9caca2e17f6073bcc10bb9714c0e3866ff00e0d386f4396392852");
let b = field_element!("0313000a764a9a5514efc99070de3f70586794f9bb0add62ac689763aadea7e8");
let c = field_element!("01fdbe22c0f4650e4307bf97acaa502bef7c55dd417acd70b9a106a74117d039");
assert_eq!(a + b, c);
}
#[test]
fn test_sub() {
let a = FieldElement::from_montgomery(u256h!(
"0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"
));
let b = FieldElement::from_montgomery(u256h!(
"024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b"
));
let c = field_element!("03d7be0dd45f307519282c76caedd14b3ead2be9cb6512ab60cfd7dfeb5a806a");
assert_eq!(a - b, c);
}
#[test]
fn test_mul() {
let a = FieldElement::from_montgomery(u256h!(
"0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"
));
let b = FieldElement::from_montgomery(u256h!(
"024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b"
));
let c = field_element!("0738900c5dcab24b419674df19d2cfeb9782eca6d1107be18577eb060390365b");
assert_eq!(a * b, c);
}
#[test]
fn test_div() {
let a = FieldElement::from_montgomery(u256h!(
"0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"
));
let b = FieldElement::from_montgomery(u256h!(
"024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b"
));
let c = field_element!("003a9a346e7103c74dfcddd0eeb4e16ca71d8887c2bed3d4ee718b62015e87b2");
assert_eq!(a / b, c);
}
proptest!(
#[test]
fn from_as_isize(n: isize) {
prop_assert_eq!(FieldElement::from(n).to_isize().unwrap(), n)
}
#[test]
fn from_as_i128(n: i128) {
prop_assert_eq!(FieldElement::from(n).to_i128().unwrap(), n);
}
#[test]
fn add_identity(a: FieldElement) {
prop_assert_eq!(&a + FieldElement::zero(), a);
}
#[test]
fn mul_identity(a: FieldElement) {
prop_assert_eq!(&a * FieldElement::one(), a);
}
#[test]
fn commutative_add(a: FieldElement, b: FieldElement) {
prop_assert_eq!(&a + &b, b + a);
}
#[test]
fn commutative_mul(a: FieldElement, b: FieldElement) {
prop_assert_eq!(&a * &b, b * a);
}
#[test]
fn associative_add(a: FieldElement, b: FieldElement, c: FieldElement) {
prop_assert_eq!(&a + (&b + &c), (a + b) + c);
}
#[test]
fn associative_mul(a: FieldElement, b: FieldElement, c: FieldElement) {
prop_assert_eq!(&a * (&b * &c), (a * b) * c);
}
#[test]
fn inverse_add(a: FieldElement) {
prop_assert!((&a + a.neg()).is_zero());
}
#[test]
fn inverse_mul(a: FieldElement) {
let inverse = a.inv();
match inverse {
None => prop_assert!(a.is_zero()),
Some(ai) => prop_assert!((a * ai).is_one()),
}
}
#[test]
fn distributivity(a: FieldElement, b: FieldElement, c: FieldElement) {
prop_assert_eq!(&a * (&b + &c), (&a * b) + (a * c));
}
#[test]
fn square(a: FieldElement) {
prop_assert_eq!(a.square(), &a * &a);
}
#[test]
fn pow_0(a: FieldElement) {
prop_assert!(a.pow(0_usize).is_one());
}
#[test]
fn pow_1(a: FieldElement) {
prop_assert_eq!(a.pow(1_usize), a);
}
#[test]
fn pow_2(a: FieldElement) {
prop_assert_eq!(a.pow(2_usize), &a * &a);
}
#[test]
fn pow_n(a: FieldElement, n: usize) {
let exponent = n % 512;
prop_assert_eq!(a.pow(exponent), repeat_n(a, exponent).product());
}
#[test]
fn fermats_little_theorem(a: FieldElement) {
prop_assert_eq!(a.pow(&FieldElement::MODULUS), a);
}
#[test]
fn square_root(a: FieldElement) {
let s = a.square();
let r = s.square_root().unwrap();
prop_assert!(r == a || r == -a);
}
);
#[test]
fn zeroth_root_of_unity() {
assert_eq!(FieldElement::root(0).unwrap(), FieldElement::one());
}
#[test]
fn roots_of_unity_squared() | {
let powers_of_two = (0..193).map(|n| U256::ONE << n);
let roots_of_unity: Vec<_> = powers_of_two
.map(|n| FieldElement::root(&n).unwrap())
.collect();
for (smaller_root, larger_root) in roots_of_unity[1..].iter().zip(roots_of_unity.as_slice())
{
assert_eq!(smaller_root.square(), *larger_root);
assert!(!smaller_root.is_one());
}
} | identifier_body |
|
mod.rs | 34, 171, 176, 198, 42, 235, 172, 180, 214,
106, 235, 173, 184, 230, 170, 235, 174, 188, 246, 234, 235, 175, 192, 6, 43, 236, 176,
196, 22, 107, 236, 177, 200, 38, 171, 236, 178, 204, 54, 235, 236, 179, 208, 70, 43,
237, 180, 212, 86, 107, 237, 181, 216, 102, 171, 237, 182, 220, 118, 235, 237, 183,
224, 134, 43, 238, 184, 228, 150, 107, 238, 185, 232, 166, 171, 238, 186, 236, 182,
235, 238, 187, 240, 198, 43, 239, 188, 244, 214, 107, 239, 189, 248, 230, 171, 239,
190, 252, 246, 235, 239, 191, 0, 7, 44, 240, 192, 4, 23, 108, 240, 193, 8, 39, 172,
240, 194, 12, 55, 236, 240, 195, 16, 71, 44, 241, 196, 20, 87, 108, 241, 197, 24, 103,
172, 241, 198, 28, 119, 236, 241, 199, 32, 135, 44, 242, 200, 36, 151, 108, 242, 201,
40, 167, 172, 242, 202, 44, 183, 236, 242, 203, 48, 199, 44, 243, 204, 52, 215, 108,
243, 205, 56, 231, 172, 243, 206, 60, 247, 236, 243, 207, 64, 7, 45, 244, 208, 68, 23,
109, 244, 209, 72, 39, 173, 244, 210, 76, 55, 237, 244, 211, 80, 71, 45, 245, 212, 84,
87, 109, 245, 213, 88, 103, 173, 245, 214, 92, 119, 237, 245, 215, 96, 135, 45, 246,
216, 100, 151, 109, 246, 217, 104, 167, 173, 246, 218, 108, 183, 237, 246, 219, 112,
199, 45, 247, 220, 116, 215, 109, 247, 221, 120, 231, 173, 247, 222, 124, 247, 237,
247, 223, 128, 7, 46, 248, 224, 132, 23, 110, 248, 225, 136, 39, 174, 248, 226, 140,
55, 238, 248, 227, 144, 71, 46, 249, 228, 148, 87, 110, 249, 229, 152, 103, 174, 249,
230, 156, 119, 238, 249, 231, 160, 135, 46, 250, 232, 164, 151, 110, 250, 233, 168,
167, 174, 250, 234, 172, 183, 238, 250, 235, 176, 199, 46, 251, 236, 180, 215, 110,
251, 237, 184, 231, 174, 251, 238, 188, 247, 238, 251, 239, 192, 7, 47, 252, 240, 196,
23, 111, 252, 241, 200, 39, 175, 252, 242, 204, 55, 239, 252, 243, 208, 71, 47, 253,
244, 212, 87, 111, 253, 245, 216, 103, 175, 253, 246, 220, 119, 239, 253, 247, 224,
135, 47, 254, 248, 228, 151, 111, 254, 249,
];
let num_bits = 10;
let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1000);
let result = decoder.collect::<Vec<_>>();
assert_eq!(result, (0..1000).collect::<Vec<_>>());
}
#[test]
fn small() {
let data = vec![3, 2];
let num_bits = 3;
let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1);
let result = decoder.collect::<Vec<_>>();
assert_eq!(result, &[2]);
}
#[test]
fn | zero_bit_width | identifier_name |
|
mod.rs | }
enum State<'a> {
None,
Bitpacked(bitpacking::Decoder<'a>),
Rle(std::iter::Take<std::iter::Repeat<u32>>),
}
// Decoder of Hybrid-RLE encoded values.
pub struct HybridRleDecoder<'a> {
decoder: Decoder<'a>,
state: State<'a>,
remaining: usize,
}
#[inline]
fn read_next<'a, 'b>(decoder: &'b mut Decoder<'a>, remaining: usize) -> State<'a> {
if decoder.num_bits() == 0 {
return State::None;
};
let state = decoder.next().unwrap();
match state {
HybridEncoded::Bitpacked(packed) => {
let num_bits = decoder.num_bits();
let length = std::cmp::min(packed.len() * 8 / num_bits as usize, remaining);
let decoder = bitpacking::Decoder::new(packed, num_bits as u8, length);
State::Bitpacked(decoder)
}
HybridEncoded::Rle(pack, additional) => {
let mut bytes = [0u8; std::mem::size_of::<u32>()];
pack.iter()
.enumerate()
.for_each(|(i, byte)| bytes[i] = *byte);
let value = u32::from_le_bytes(bytes);
State::Rle(std::iter::repeat(value).take(additional))
}
}
}
impl<'a> HybridRleDecoder<'a> {
pub fn new(data: &'a [u8], num_bits: u32, num_values: usize) -> Self {
let mut decoder = Decoder::new(data, num_bits);
let state = read_next(&mut decoder, num_values);
Self {
decoder,
state,
remaining: num_values,
}
}
}
impl<'a> Iterator for HybridRleDecoder<'a> {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
};
let result = match &mut self.state {
State::Bitpacked(decoder) => decoder.next(),
State::Rle(iter) => iter.next(),
State::None => Some(0),
};
if let Some(result) = result {
self.remaining -= 1;
Some(result)
} else {
self.state = read_next(&mut self.decoder, self.remaining);
self.next()
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn roundtrip() {
let mut buffer = vec![];
let num_bits = 10;
let data = (0..1000).collect::<Vec<_>>();
encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap();
let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len());
let result = decoder.collect::<Vec<_>>();
assert_eq!(result, data);
}
#[test]
fn pyarrow_integration() {
// data encoded from pyarrow representing (0..1000)
let data = vec![
127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16,
68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32,
132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48,
196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15,
64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19,
80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23,
96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225,
198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124,
244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34,
140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201,
38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164,
162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45,
184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204,
49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99,
205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205 | Bitpacked(&'a [u8]),
/// A RLE-encoded slice. The first attribute corresponds to the slice (that can be interpreted)
/// the second attribute corresponds to the number of repetitions.
Rle(&'a [u8], usize), | random_line_split |
|
mod.rs | 208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36,
209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133,
36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48,
197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 79,
64, 5, 37, 212, 80, 68, 21, 101, 212, 81, 72, 37, 165, 212, 82, 76, 53, 229, 212, 83,
80, 69, 37, 213, 84, 84, 85, 101, 213, 85, 88, 101, 165, 213, 86, 92, 117, 229, 213,
87, 96, 133, 37, 214, 88, 100, 1 | {
// data encoded from pyarrow representing (0..1000)
let data = vec![
127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16,
68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32,
132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48,
196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15,
64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19,
80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23,
96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225,
198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124,
244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34,
140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201,
38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164,
162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45,
184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204,
49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99,
205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228,
148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207,
60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36, | identifier_body |
|
mod.rs | else {
self.state = read_next(&mut self.decoder, self.remaining);
self.next()
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn roundtrip() {
let mut buffer = vec![];
let num_bits = 10;
let data = (0..1000).collect::<Vec<_>>();
encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap();
let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len());
let result = decoder.collect::<Vec<_>>();
assert_eq!(result, data);
}
#[test]
fn pyarrow_integration() {
// data encoded from pyarrow representing (0..1000)
let data = vec![
127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16,
68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32,
132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48,
196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15,
64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19,
80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23,
96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225,
198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124,
244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34,
140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201,
38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164,
162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45,
184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204,
49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99,
205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228,
148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207,
60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36,
208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36,
209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133,
36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48,
197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 7 | {
self.remaining -= 1;
Some(result)
} | conditional_block |
|
framebuffer_server.rs | ;
use fuchsia_component::{client::connect_channel_to_protocol, server::ServiceFs};
use fuchsia_framebuffer::{sysmem::BufferCollectionAllocator, FrameUsage};
use fuchsia_scenic::{BufferCollectionTokenPair, ViewRefPair};
use fuchsia_zircon as zx;
use futures::{StreamExt, TryStreamExt};
use std::sync::{mpsc::channel, Arc};
use crate::logging::log_warn;
use crate::types::*;
/// The width of the framebuffer image.
pub const IMAGE_WIDTH: u32 = 720;
/// The height of the framebuffer image.
pub const IMAGE_HEIGHT: u32 = 1200;
/// The offset at which the framebuffer will be placed. Assume a display width of 1920.
pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2;
/// The Flatland identifier for the framebuffer image.
const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 };
/// The Flatland identifier for the transform associated with the framebuffer.
const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 };
/// The protocols that are exposed by the framebuffer server.
enum ExposedProtocols {
ViewProvider(fuiapp::ViewProviderRequestStream),
}
/// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection
/// that is registered with Flatland.
pub struct FramebufferServer {
/// The Flatland proxy associated with this server.
flatland: fuicomposition::FlatlandSynchronousProxy,
/// The buffer collection that is registered with Flatland.
collection: fsysmem::BufferCollectionInfo2,
}
impl FramebufferServer {
/// Returns a `FramebufferServer` that has created a scene and registered a buffer with
/// Flatland.
pub fn new() -> Result<Self, Errno> {
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end);
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end);
let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?;
Ok(Self { flatland, collection })
}
/// Returns a clone of the VMO that is shared with Flatland.
pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> {
self.collection.buffers[0]
.vmo
.as_ref()
.ok_or_else(|| errno!(EINVAL))?
.duplicate_handle(zx::Rights::SAME_RIGHTS)
.map_err(|_| errno!(EINVAL))
}
}
/// Initializes the flatland scene, and returns the associated buffer collection.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
fn init_scene(
flatland: &fuicomposition::FlatlandSynchronousProxy,
allocator: &fuicomposition::AllocatorSynchronousProxy,
) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error> |
let sysmem_buffer_collection_token =
executor.run_singlethreaded(buffer_allocator.duplicate_token())?;
// Notify the async code that the sysmem buffer collection token is available.
collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection");
let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?;
// Notify the async code that the buffer allocation completed.
allocation_sender.send(allocation).expect("Failed to send allocation");
Ok(())
});
// Wait for the async code to generate the buffer collection token.
let sysmem_buffer_collection_token = collection_receiver
.recv()
.map_err(|_| anyhow!("Error receiving buffer collection token"))?;
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fuicomposition::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fuicomposition::RegisterBufferCollectionArgs::EMPTY
};
allocator
.register_buffer_collection(args, zx::Time::INFINITE)
.map_err(|_| anyhow!("FIDL error registering buffer collection"))?
.map_err(|_| anyhow!("Error registering buffer collection"))?;
// Now that the buffer collection is registered, wait for the buffer allocation to happen.
let allocation =
allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?;
let image_props = fuicomposition::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fuicomposition::ImageProperties::EMPTY
};
flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.map_err(|_| anyhow!("FIDL error creating image"))?;
flatland
.create_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error creating transform"))?;
flatland
.set_root_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error setting root transform"))?;
flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.map_err(|_| anyhow!("error setting content"))?;
flatland
.set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 })
.map_err(|_| anyhow!("error setting translation"))?;
Ok(allocation)
}
/// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
pub fn spawn_view_provider(
server: Arc<FramebufferServer>,
outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>,
) {
std::thread::spawn(|| {
let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor");
executor.run_singlethreaded(async move {
let mut service_fs = ServiceFs::new_local();
service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider);
service_fs.serve_connection(outgoing_dir).expect("");
while let Some(ExposedProtocols::ViewProvider(mut request_stream)) =
service_fs.next().await
{
while let Ok(Some(event)) = request_stream.try_next().await {
match event {
fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => {
let mut view_creation_token = args.view_creation_token.unwrap();
let mut view_identity = fuiviews::ViewIdentityOnCreation::from(
ViewRefPair::new().expect("Failed to create ViewRefPair"),
);
let view_bound_protocols = fuicomposition::ViewBoundProtocols {
..fuicomposition::ViewBoundProtocols::EMPTY
};
// We don't actually care about the parent viewport at the moment, because we don't resize.
let (_parent_viewport_watcher, parent_viewport_watcher_request) =
create_proxy::<fuicomposition::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
server
.flatland
.create_view2(
&mut view_creation_token,
&mut view_identity,
view_bound_protocols,
parent_viewport_watcher_request,
)
.expect("FIDL error");
server
.flatland
.set_image_destination_size(
&mut IMAGE_ID.clone(),
&mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT },
)
.expect("fidl error");
// Now that the view has been created, start presenting.
start_presenting(server.clone());
}
r => {
log_warn!("Got unexpected view provider request: {:?}", r | {
let (collection_sender, collection_receiver) = channel();
let (allocation_sender, allocation_receiver) = channel();
// This thread is spawned to deal with the mix of asynchronous and synchronous proxies.
// In particular, we want to keep Framebuffer creation synchronous, while still making use of
// BufferCollectionAllocator (which exposes an async api).
//
// The spawned thread will execute the futures and send results back to this thread via a
// channel.
std::thread::spawn(move || -> Result<(), anyhow::Error> {
let mut executor = fasync::LocalExecutor::new()?;
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8,
FrameUsage::Cpu,
1,
)?;
buffer_allocator.set_name(100, "Starnix ViewProvider")?; | identifier_body |
framebuffer_server.rs | async;
use fuchsia_component::{client::connect_channel_to_protocol, server::ServiceFs};
use fuchsia_framebuffer::{sysmem::BufferCollectionAllocator, FrameUsage};
use fuchsia_scenic::{BufferCollectionTokenPair, ViewRefPair};
use fuchsia_zircon as zx;
use futures::{StreamExt, TryStreamExt};
use std::sync::{mpsc::channel, Arc};
use crate::logging::log_warn;
use crate::types::*;
/// The width of the framebuffer image.
pub const IMAGE_WIDTH: u32 = 720;
/// The height of the framebuffer image.
pub const IMAGE_HEIGHT: u32 = 1200;
/// The offset at which the framebuffer will be placed. Assume a display width of 1920.
pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2;
/// The Flatland identifier for the framebuffer image.
const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 };
/// The Flatland identifier for the transform associated with the framebuffer.
const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 };
/// The protocols that are exposed by the framebuffer server.
enum ExposedProtocols {
ViewProvider(fuiapp::ViewProviderRequestStream),
}
/// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection
/// that is registered with Flatland.
pub struct | {
/// The Flatland proxy associated with this server.
flatland: fuicomposition::FlatlandSynchronousProxy,
/// The buffer collection that is registered with Flatland.
collection: fsysmem::BufferCollectionInfo2,
}
impl FramebufferServer {
/// Returns a `FramebufferServer` that has created a scene and registered a buffer with
/// Flatland.
pub fn new() -> Result<Self, Errno> {
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end);
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end);
let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?;
Ok(Self { flatland, collection })
}
/// Returns a clone of the VMO that is shared with Flatland.
pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> {
self.collection.buffers[0]
.vmo
.as_ref()
.ok_or_else(|| errno!(EINVAL))?
.duplicate_handle(zx::Rights::SAME_RIGHTS)
.map_err(|_| errno!(EINVAL))
}
}
/// Initializes the flatland scene, and returns the associated buffer collection.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
fn init_scene(
flatland: &fuicomposition::FlatlandSynchronousProxy,
allocator: &fuicomposition::AllocatorSynchronousProxy,
) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error> {
let (collection_sender, collection_receiver) = channel();
let (allocation_sender, allocation_receiver) = channel();
// This thread is spawned to deal with the mix of asynchronous and synchronous proxies.
// In particular, we want to keep Framebuffer creation synchronous, while still making use of
// BufferCollectionAllocator (which exposes an async api).
//
// The spawned thread will execute the futures and send results back to this thread via a
// channel.
std::thread::spawn(move || -> Result<(), anyhow::Error> {
let mut executor = fasync::LocalExecutor::new()?;
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8,
FrameUsage::Cpu,
1,
)?;
buffer_allocator.set_name(100, "Starnix ViewProvider")?;
let sysmem_buffer_collection_token =
executor.run_singlethreaded(buffer_allocator.duplicate_token())?;
// Notify the async code that the sysmem buffer collection token is available.
collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection");
let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?;
// Notify the async code that the buffer allocation completed.
allocation_sender.send(allocation).expect("Failed to send allocation");
Ok(())
});
// Wait for the async code to generate the buffer collection token.
let sysmem_buffer_collection_token = collection_receiver
.recv()
.map_err(|_| anyhow!("Error receiving buffer collection token"))?;
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fuicomposition::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fuicomposition::RegisterBufferCollectionArgs::EMPTY
};
allocator
.register_buffer_collection(args, zx::Time::INFINITE)
.map_err(|_| anyhow!("FIDL error registering buffer collection"))?
.map_err(|_| anyhow!("Error registering buffer collection"))?;
// Now that the buffer collection is registered, wait for the buffer allocation to happen.
let allocation =
allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?;
let image_props = fuicomposition::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fuicomposition::ImageProperties::EMPTY
};
flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.map_err(|_| anyhow!("FIDL error creating image"))?;
flatland
.create_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error creating transform"))?;
flatland
.set_root_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error setting root transform"))?;
flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.map_err(|_| anyhow!("error setting content"))?;
flatland
.set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 })
.map_err(|_| anyhow!("error setting translation"))?;
Ok(allocation)
}
/// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
pub fn spawn_view_provider(
server: Arc<FramebufferServer>,
outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>,
) {
std::thread::spawn(|| {
let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor");
executor.run_singlethreaded(async move {
let mut service_fs = ServiceFs::new_local();
service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider);
service_fs.serve_connection(outgoing_dir).expect("");
while let Some(ExposedProtocols::ViewProvider(mut request_stream)) =
service_fs.next().await
{
while let Ok(Some(event)) = request_stream.try_next().await {
match event {
fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => {
let mut view_creation_token = args.view_creation_token.unwrap();
let mut view_identity = fuiviews::ViewIdentityOnCreation::from(
ViewRefPair::new().expect("Failed to create ViewRefPair"),
);
let view_bound_protocols = fuicomposition::ViewBoundProtocols {
..fuicomposition::ViewBoundProtocols::EMPTY
};
// We don't actually care about the parent viewport at the moment, because we don't resize.
let (_parent_viewport_watcher, parent_viewport_watcher_request) =
create_proxy::<fuicomposition::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
server
.flatland
.create_view2(
&mut view_creation_token,
&mut view_identity,
view_bound_protocols,
parent_viewport_watcher_request,
)
.expect("FIDL error");
server
.flatland
.set_image_destination_size(
&mut IMAGE_ID.clone(),
&mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT },
)
.expect("fidl error");
// Now that the view has been created, start presenting.
start_presenting(server.clone());
}
r => {
log_warn!("Got unexpected view provider request: {:?}", r);
| FramebufferServer | identifier_name |
framebuffer_server.rs | _WIDTH: u32 = 720;
/// The height of the framebuffer image.
pub const IMAGE_HEIGHT: u32 = 1200;
/// The offset at which the framebuffer will be placed. Assume a display width of 1920.
pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2;
/// The Flatland identifier for the framebuffer image.
const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 };
/// The Flatland identifier for the transform associated with the framebuffer.
const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 };
/// The protocols that are exposed by the framebuffer server.
enum ExposedProtocols {
ViewProvider(fuiapp::ViewProviderRequestStream),
}
/// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection
/// that is registered with Flatland.
pub struct FramebufferServer {
/// The Flatland proxy associated with this server.
flatland: fuicomposition::FlatlandSynchronousProxy,
/// The buffer collection that is registered with Flatland.
collection: fsysmem::BufferCollectionInfo2,
}
impl FramebufferServer {
/// Returns a `FramebufferServer` that has created a scene and registered a buffer with
/// Flatland.
pub fn new() -> Result<Self, Errno> {
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end);
let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?;
connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end)
.map_err(|_| errno!(ENOENT))?;
let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end);
let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?;
Ok(Self { flatland, collection })
}
/// Returns a clone of the VMO that is shared with Flatland.
pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> {
self.collection.buffers[0]
.vmo
.as_ref()
.ok_or_else(|| errno!(EINVAL))?
.duplicate_handle(zx::Rights::SAME_RIGHTS)
.map_err(|_| errno!(EINVAL))
}
}
/// Initializes the flatland scene, and returns the associated buffer collection.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
fn init_scene(
flatland: &fuicomposition::FlatlandSynchronousProxy,
allocator: &fuicomposition::AllocatorSynchronousProxy,
) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error> {
let (collection_sender, collection_receiver) = channel();
let (allocation_sender, allocation_receiver) = channel();
// This thread is spawned to deal with the mix of asynchronous and synchronous proxies.
// In particular, we want to keep Framebuffer creation synchronous, while still making use of
// BufferCollectionAllocator (which exposes an async api).
//
// The spawned thread will execute the futures and send results back to this thread via a
// channel.
std::thread::spawn(move || -> Result<(), anyhow::Error> {
let mut executor = fasync::LocalExecutor::new()?;
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8,
FrameUsage::Cpu,
1,
)?;
buffer_allocator.set_name(100, "Starnix ViewProvider")?;
let sysmem_buffer_collection_token =
executor.run_singlethreaded(buffer_allocator.duplicate_token())?;
// Notify the async code that the sysmem buffer collection token is available.
collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection");
let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?;
// Notify the async code that the buffer allocation completed.
allocation_sender.send(allocation).expect("Failed to send allocation");
Ok(())
});
// Wait for the async code to generate the buffer collection token.
let sysmem_buffer_collection_token = collection_receiver
.recv()
.map_err(|_| anyhow!("Error receiving buffer collection token"))?;
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fuicomposition::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fuicomposition::RegisterBufferCollectionArgs::EMPTY
};
allocator
.register_buffer_collection(args, zx::Time::INFINITE)
.map_err(|_| anyhow!("FIDL error registering buffer collection"))?
.map_err(|_| anyhow!("Error registering buffer collection"))?;
// Now that the buffer collection is registered, wait for the buffer allocation to happen.
let allocation =
allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?;
let image_props = fuicomposition::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fuicomposition::ImageProperties::EMPTY
};
flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.map_err(|_| anyhow!("FIDL error creating image"))?;
flatland
.create_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error creating transform"))?;
flatland
.set_root_transform(&mut TRANSFORM_ID.clone())
.map_err(|_| anyhow!("error setting root transform"))?;
flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.map_err(|_| anyhow!("error setting content"))?;
flatland
.set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 })
.map_err(|_| anyhow!("error setting translation"))?;
Ok(allocation)
}
/// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`.
///
/// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and
/// most of the failures would be unexpected and unrecoverable.
pub fn spawn_view_provider(
server: Arc<FramebufferServer>,
outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>,
) {
std::thread::spawn(|| {
let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor");
executor.run_singlethreaded(async move {
let mut service_fs = ServiceFs::new_local();
service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider);
service_fs.serve_connection(outgoing_dir).expect("");
while let Some(ExposedProtocols::ViewProvider(mut request_stream)) =
service_fs.next().await
{
while let Ok(Some(event)) = request_stream.try_next().await {
match event {
fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => {
let mut view_creation_token = args.view_creation_token.unwrap();
let mut view_identity = fuiviews::ViewIdentityOnCreation::from(
ViewRefPair::new().expect("Failed to create ViewRefPair"),
);
let view_bound_protocols = fuicomposition::ViewBoundProtocols {
..fuicomposition::ViewBoundProtocols::EMPTY
};
// We don't actually care about the parent viewport at the moment, because we don't resize.
let (_parent_viewport_watcher, parent_viewport_watcher_request) =
create_proxy::<fuicomposition::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
server
.flatland
.create_view2(
&mut view_creation_token,
&mut view_identity,
view_bound_protocols,
parent_viewport_watcher_request,
)
.expect("FIDL error");
server
.flatland
.set_image_destination_size(
&mut IMAGE_ID.clone(),
&mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT },
)
.expect("fidl error");
// Now that the view has been created, start presenting.
start_presenting(server.clone());
}
r => {
log_warn!("Got unexpected view provider request: {:?}", r);
}
}
}
}
});
});
}
/// Starts a flatland presentation loop, using the flatland proxy in `server`.
fn start_presenting(server: Arc<FramebufferServer>) {
fasync::Task::local(async move {
let sched_lib = ThroughputScheduler::new();
// Request an initial presentation.
sched_lib.request_present();
loop {
let present_parameters = sched_lib.wait_to_update().await;
sched_lib.request_present();
server
.flatland | random_line_split |
||
base.py | Sequence(sequence)
def lineReceived(self, line):
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.lineReceived(line)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def rawDataReceived(self, data):
"""Handle incoming content."""
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.rawDataReceived(data)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def createResponse(self, chanRequest):
raise NotImplementedError, "must be implemented in subclass"
def requestFinished(self, request):
"""Request done."""
if self.chanRequest is not None:
del self.chanRequest
self.setTimeout(None)
if self._requests:
self.chanRequest = self._requests.popleft()
self.chanRequest.submit()
return
if self.pool and not self.transport.disconnecting:
self.pool.freeProtocol(self)
def connectionLost(self, reason):
self.setTimeout(None)
# Tell all requests to abort.
if self.chanRequest is not None:
req = self.chanRequest
del self.chanRequest
req.connectionLost(reason)
while self._requests:
self._requests.popleft().connectionLost(reason)
if self.pool:
self.pool.protocolConnectionLost(self, reason)
def loseConnection(self):
self.transport.loseConnection()
def makeConnection(self, transport):
basic.LineReceiver.makeConnection(self, transport)
if self.pool:
self.pool.protocolCreated(self)
def registerProducer(self, producer, streaming):
"""Register a producer."""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
class ClientProtocolPool(object):
def __init__(self, addr, factory, maxConn=50, maxIdleTime=600):
self.addr = addr
self.factory = factory
self.maxConn = maxConn
self.maxIdleTime = maxIdleTime
self._busy = []
self._idle = []
self._size = 0
self.dead = False
self.deferredRequests = deque()
def protocolCreated(self, protocol):
if self.dead:
self.dead = False
self._size += 1
self.touch(protocol)
if self.deferredRequests: # if there's deferred requests, return this protocol
self._busy.append(protocol)
self.deferredRequests.popleft().callback(protocol)
else:
self._idle.append(protocol)
protocol.busy = False
def deferRequest(self):
d = defer.Deferred()
self.deferredRequests.append(d)
return d
def markDead(self, reason):
log.msg('Host[%s:%s] is dead' % self.addr)
if self.dead:
return
self.dead = True
while self.deferredRequests:
self.deferredRequests.popleft().errback(reason)
self._busy = []
self._idle = []
self._size = 0
def create(self):
self.factory.createProtocol(self.addr)
return self.deferRequest()
def get(self, wait=True):
try:
p = self._idle.pop(0)
self._busy.append(p)
self.touch(p)
return p
except IndexError:
if not wait:
return None
if self._size < self.maxConn:
return self.create()
elif self._busy:
# wait busy conn to be idle
return self.deferRequest()
return None # should not happen if maxConn > 0
def touch(self, p):
p.last_access = int(time.time())
p.busy = True
def free(self, protocol):
assert protocol.addr == self.addr
if self.deferredRequests: # if there's deferred requests, return this protocol
self.touch(protocol)
self.deferredRequests.popleft().callback(protocol)
return
try:
self._busy.remove(protocol)
except:
log.err()
self._idle.append(protocol)
protocol.busy = False
def remove(self, protocol):
assert protocol.addr == self.addr
if protocol.busy:
ls = (self._busy, self._idle)
else:
ls = (self._idle, self._busy)
try:
ls[0].remove(protocol)
self._size -= 1
except:
try:
ls[1].remove(protocol)
self._size -= 1
except: # already removed
pass
def maintain(self):
expire = int(time.time()) - self.maxIdleTime
idles = copy.copy(self._idle)
for p in idles:
if not p.connected:
log.msg('removing disconnected protocol %s from idle pool' % str(p))
self.remove(p)
elif p.last_access < expire:
log.msg('removing expired protocol %s' % str(p))
p.loseConnection()
self.remove(p)
busies = copy.copy(self._busy)
for p in busies:
if not p.connected:
log.msg('removing disconnected protocol %s from busy pool' % str(p))
self.remove(p)
class PooledClientFactory(protocol.ClientFactory):
protocol = BaseClientProtocol
def __init__(self, pool):
self.pool = pool
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.addr = (addr.host, addr.port)
p.pool = self.pool
return p
def clientConnectionLost(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionLost(addr, reason)
def clientConnectionFailed(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionFailed(addr, reason)
class BaseClient(object):
FactoryClass = PooledClientFactory
def __init__(self, hosts=None, connector=None, connTimeout=30, maintTime=300, deadRetryTime=5, retry=0, **kwargs):
self.factory = self.FactoryClass(self)
self.connector = connector or reactor
self.connTimeout = connTimeout
self.maintTime = maintTime
self.deadRetryTime = deadRetryTime
self.retry = retry
self.hosts = []
self.hostsPool = {}
self.hostsDead = {}
if hosts is not None:
for host in hosts:
ip, port = host.split(":")
port = int(port)
self.addHost((ip, port))
self.maintID = reactor.callLater(self.maintTime, self._selfMaintain)
def addHost(self, addr):
pool = self.getPool(addr)
self.hosts.append(pool)
def protocolCreated(self, protocol):
addr = protocol.addr
pool = self.getPool(addr)
pool.protocolCreated(protocol)
if self.hostsDead.has_key(addr):
self.hostsDead.remove(addr)
def getPool(self, addr):
if self.hostsPool.has_key(addr):
return self.hostsPool[addr]
pool = self.hostsPool[addr] = ClientProtocolPool(addr, self)
return pool
def protocolConnectionLost(self, protocol, reason):
addr = protocol.addr
pool = self.getPool(addr)
pool.remove(protocol)
def connectionLost(self, addr, reason):
self._maybeDead(addr, reason)
def connectionFailed(self, addr, reason):
self._maybeDead(addr, reason)
def _maybeDead(self, addr, reason):
if reason.check(error.ConnectionDone, error.ConnectionLost):
return
pool = self.getPool(addr)
if pool.dead:
return
#if reason.check(ConnectionRefusedErrr,...):
pool.markDead(reason)
def createProtocol(self, addr):
self.connector.connectTCP(addr[0], addr[1], self.factory, self.connTimeout)
def freeProtocol(self, protocol):
pool = self.getPool(protocol.addr)
pool.free(protocol)
def getProtocol(self, addr=None):
if addr is not None:
now = time.time()
# try dead hosts every 5 seconds
# if host is down and last down time is
# less than 5 seconds, ignore
if addr in self.hostsDead and self.hostsDead[addr] > now - self.deadRetryTime:
return None
return self.getPool(addr).get()
else:
p = self._getRandomProtocol(wait=False)
if p is not None:
return p
# no idle protocol found
return self._getRandomProtocol(wait=True)
def _getRandomProtocol(self, wait=True):
| size = len(self.hostsPool)
if size == 0:
return None
if size > 15:
tries = 15
else:
tries = size
pools = self.hostsPool.values()
idx = random.randint(1, size)
for t in xrange(tries):
pool = pools[idx % size]
idx += 1
p = pool.get(wait)
if p is not None: | identifier_body |
|
base.py | finished writing data."""
self.finishedWriting = True
def abortWithError(self, err):
if self.stream is not None:
self.stream.finish(err)
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.errback(err)
self.finishRequest()
def connectionLost(self, reason):
if not self.finished:
self.abortWithError(reason)
def createResponse(self):
if self.length:
self.stream = stream_mod.ProducerStream()
self.response = self.channel.createResponse(self)
self.stream.registerProducer(self, True)
else:
self.response = self.channel.createResponse(self)
def processResponse(self, result=None):
if result is None:
result = self.response
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.callback(result)
def handleContentChunk(self, data):
if self.stream:
self.stream.write(data)
def registerProducer(self, producer, streaming):
"""Register a producer.
"""
self.channel.registerProducer(producer, streaming)
def unregisterProducer(self):
self.channel.unregisterProducer()
# producer interface
def pauseProducing(self):
if not self.finishedReading:
self.channel.pauseProducing()
def resumeProducing(self):
if not self.finishedReading:
self.channel.resumeProducing()
def stopProducing(self):
if not self.finishedReading:
self.channel.stopProducing()
class BaseClientProtocol(basic.LineReceiver, policies.TimeoutMixin, object):
"""Base Client Protocol"""
timeOut = 60
chanRequest = None
ChannelRequest = BaseClientChannelRequest
pool = None
def __init__(self):
self._requests = deque()
def submitRequest(self, request, *args, **kwargs):
req = self.ChannelRequest(self, request, *args, **kwargs)
if self.chanRequest is not None:
self._requests.append(req)
else:
self.chanRequest = req
req.submit()
return req.responseDefer
def write(self, data):
self.setTimeout(self.timeOut)
self.transport.write(data)
def writeSequence(self, sequence):
self.setTimeout(self.timeOut)
self.transport.writeSequence(sequence)
def lineReceived(self, line):
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.lineReceived(line)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def rawDataReceived(self, data):
"""Handle incoming content."""
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.rawDataReceived(data)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def createResponse(self, chanRequest):
raise NotImplementedError, "must be implemented in subclass"
def requestFinished(self, request):
"""Request done."""
if self.chanRequest is not None:
del self.chanRequest
self.setTimeout(None)
if self._requests:
self.chanRequest = self._requests.popleft()
self.chanRequest.submit()
return
if self.pool and not self.transport.disconnecting:
self.pool.freeProtocol(self)
def connectionLost(self, reason):
self.setTimeout(None)
# Tell all requests to abort.
if self.chanRequest is not None:
req = self.chanRequest
del self.chanRequest
req.connectionLost(reason)
while self._requests:
self._requests.popleft().connectionLost(reason)
if self.pool:
self.pool.protocolConnectionLost(self, reason)
def loseConnection(self):
self.transport.loseConnection()
def makeConnection(self, transport):
basic.LineReceiver.makeConnection(self, transport)
if self.pool:
self.pool.protocolCreated(self)
def registerProducer(self, producer, streaming):
"""Register a producer."""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
class ClientProtocolPool(object):
def __init__(self, addr, factory, maxConn=50, maxIdleTime=600):
self.addr = addr
self.factory = factory
self.maxConn = maxConn
self.maxIdleTime = maxIdleTime
self._busy = []
self._idle = []
self._size = 0
self.dead = False
self.deferredRequests = deque()
def protocolCreated(self, protocol):
if self.dead:
self.dead = False
self._size += 1
self.touch(protocol)
if self.deferredRequests: # if there's deferred requests, return this protocol
self._busy.append(protocol)
self.deferredRequests.popleft().callback(protocol)
else:
self._idle.append(protocol)
protocol.busy = False
def deferRequest(self):
d = defer.Deferred()
self.deferredRequests.append(d)
return d
def markDead(self, reason):
log.msg('Host[%s:%s] is dead' % self.addr)
if self.dead:
return
self.dead = True
while self.deferredRequests:
self.deferredRequests.popleft().errback(reason)
self._busy = []
self._idle = []
self._size = 0
def create(self):
self.factory.createProtocol(self.addr)
return self.deferRequest()
def get(self, wait=True):
try:
p = self._idle.pop(0)
self._busy.append(p)
self.touch(p)
return p
except IndexError:
if not wait:
return None
if self._size < self.maxConn:
return self.create()
elif self._busy:
# wait busy conn to be idle
return self.deferRequest()
return None # should not happen if maxConn > 0
def touch(self, p):
p.last_access = int(time.time())
p.busy = True
def free(self, protocol):
assert protocol.addr == self.addr
if self.deferredRequests: # if there's deferred requests, return this protocol
self.touch(protocol)
self.deferredRequests.popleft().callback(protocol)
return
try:
self._busy.remove(protocol)
except:
log.err()
self._idle.append(protocol)
protocol.busy = False
def remove(self, protocol):
assert protocol.addr == self.addr
if protocol.busy:
ls = (self._busy, self._idle)
else:
ls = (self._idle, self._busy)
try:
ls[0].remove(protocol)
self._size -= 1
except:
try:
ls[1].remove(protocol)
self._size -= 1
except: # already removed
pass
def maintain(self):
expire = int(time.time()) - self.maxIdleTime
idles = copy.copy(self._idle)
for p in idles:
if not p.connected:
log.msg('removing disconnected protocol %s from idle pool' % str(p))
self.remove(p)
elif p.last_access < expire:
log.msg('removing expired protocol %s' % str(p))
p.loseConnection()
self.remove(p)
busies = copy.copy(self._busy)
for p in busies:
if not p.connected:
log.msg('removing disconnected protocol %s from busy pool' % str(p))
self.remove(p)
class PooledClientFactory(protocol.ClientFactory):
protocol = BaseClientProtocol
def __init__(self, pool):
self.pool = pool
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.addr = (addr.host, addr.port)
p.pool = self.pool
return p
def clientConnectionLost(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionLost(addr, reason)
def clientConnectionFailed(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionFailed(addr, reason)
class BaseClient(object):
FactoryClass = PooledClientFactory
def __init__(self, hosts=None, connector=None, connTimeout=30, maintTime=300, deadRetryTime=5, retry=0, **kwargs):
self.factory = self.FactoryClass(self)
self.connector = connector or reactor
self.connTimeout = connTimeout
self.maintTime = maintTime
self.deadRetryTime = deadRetryTime
self.retry = retry
self.hosts = []
self.hostsPool = {}
self.hostsDead = {}
if hosts is not None:
for host in hosts:
ip, port = host.split(":")
port = int(port)
self.addHost((ip, port))
self.maintID = reactor.callLater(self.maintTime, self._selfMaintain)
def addHost(self, addr):
pool = self.getPool(addr)
self.hosts.append(pool)
def protocolCreated(self, protocol):
addr = protocol.addr
pool = self.getPool(addr)
pool.protocolCreated(protocol)
if self.hostsDead.has_key(addr):
| self.hostsDead.remove(addr) | conditional_block |
|
base.py | Request(self):
self.finished = True
self.channel.requestFinished(self)
def submit(self):
self.submitHeaders()
if self.request.stream:
d = stream_mod.StreamProducer(self.request.stream).beginProducing(self)
d.addCallback(self.finishWriting).addErrback(self.abortWithError)
else:
self.finishWriting(None)
def submitHeaders(self):
"""Write request headers"""
r = self.request
self.channel.write("%s %s\r\n" % (r.cmd, url.encode_url_string(r.headers)))
def write(self, data):
if not data:
return
self.channel.write(data)
def finishWriting(self, x=None):
"""We are finished writing data."""
self.finishedWriting = True
def abortWithError(self, err):
if self.stream is not None:
self.stream.finish(err)
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.errback(err)
self.finishRequest()
def connectionLost(self, reason):
if not self.finished:
self.abortWithError(reason)
def createResponse(self):
if self.length:
self.stream = stream_mod.ProducerStream()
self.response = self.channel.createResponse(self)
self.stream.registerProducer(self, True)
else:
self.response = self.channel.createResponse(self)
def processResponse(self, result=None):
if result is None:
result = self.response
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.callback(result)
def handleContentChunk(self, data):
if self.stream:
self.stream.write(data)
def registerProducer(self, producer, streaming):
"""Register a producer.
"""
self.channel.registerProducer(producer, streaming)
def unregisterProducer(self):
self.channel.unregisterProducer()
# producer interface
def pauseProducing(self):
if not self.finishedReading:
self.channel.pauseProducing()
def resumeProducing(self):
if not self.finishedReading:
self.channel.resumeProducing()
def stopProducing(self):
if not self.finishedReading:
self.channel.stopProducing()
class BaseClientProtocol(basic.LineReceiver, policies.TimeoutMixin, object):
"""Base Client Protocol"""
timeOut = 60
chanRequest = None
ChannelRequest = BaseClientChannelRequest
pool = None
def __init__(self):
self._requests = deque()
def submitRequest(self, request, *args, **kwargs):
req = self.ChannelRequest(self, request, *args, **kwargs)
if self.chanRequest is not None:
self._requests.append(req)
else:
self.chanRequest = req
req.submit()
return req.responseDefer
def write(self, data):
self.setTimeout(self.timeOut)
self.transport.write(data)
def writeSequence(self, sequence): | def lineReceived(self, line):
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.lineReceived(line)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def rawDataReceived(self, data):
"""Handle incoming content."""
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.rawDataReceived(data)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def createResponse(self, chanRequest):
raise NotImplementedError, "must be implemented in subclass"
def requestFinished(self, request):
"""Request done."""
if self.chanRequest is not None:
del self.chanRequest
self.setTimeout(None)
if self._requests:
self.chanRequest = self._requests.popleft()
self.chanRequest.submit()
return
if self.pool and not self.transport.disconnecting:
self.pool.freeProtocol(self)
def connectionLost(self, reason):
self.setTimeout(None)
# Tell all requests to abort.
if self.chanRequest is not None:
req = self.chanRequest
del self.chanRequest
req.connectionLost(reason)
while self._requests:
self._requests.popleft().connectionLost(reason)
if self.pool:
self.pool.protocolConnectionLost(self, reason)
def loseConnection(self):
self.transport.loseConnection()
def makeConnection(self, transport):
basic.LineReceiver.makeConnection(self, transport)
if self.pool:
self.pool.protocolCreated(self)
def registerProducer(self, producer, streaming):
"""Register a producer."""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
class ClientProtocolPool(object):
def __init__(self, addr, factory, maxConn=50, maxIdleTime=600):
self.addr = addr
self.factory = factory
self.maxConn = maxConn
self.maxIdleTime = maxIdleTime
self._busy = []
self._idle = []
self._size = 0
self.dead = False
self.deferredRequests = deque()
def protocolCreated(self, protocol):
if self.dead:
self.dead = False
self._size += 1
self.touch(protocol)
if self.deferredRequests: # if there's deferred requests, return this protocol
self._busy.append(protocol)
self.deferredRequests.popleft().callback(protocol)
else:
self._idle.append(protocol)
protocol.busy = False
def deferRequest(self):
d = defer.Deferred()
self.deferredRequests.append(d)
return d
def markDead(self, reason):
log.msg('Host[%s:%s] is dead' % self.addr)
if self.dead:
return
self.dead = True
while self.deferredRequests:
self.deferredRequests.popleft().errback(reason)
self._busy = []
self._idle = []
self._size = 0
def create(self):
self.factory.createProtocol(self.addr)
return self.deferRequest()
def get(self, wait=True):
try:
p = self._idle.pop(0)
self._busy.append(p)
self.touch(p)
return p
except IndexError:
if not wait:
return None
if self._size < self.maxConn:
return self.create()
elif self._busy:
# wait busy conn to be idle
return self.deferRequest()
return None # should not happen if maxConn > 0
def touch(self, p):
p.last_access = int(time.time())
p.busy = True
def free(self, protocol):
assert protocol.addr == self.addr
if self.deferredRequests: # if there's deferred requests, return this protocol
self.touch(protocol)
self.deferredRequests.popleft().callback(protocol)
return
try:
self._busy.remove(protocol)
except:
log.err()
self._idle.append(protocol)
protocol.busy = False
def remove(self, protocol):
assert protocol.addr == self.addr
if protocol.busy:
ls = (self._busy, self._idle)
else:
ls = (self._idle, self._busy)
try:
ls[0].remove(protocol)
self._size -= 1
except:
try:
ls[1].remove(protocol)
self._size -= 1
except: # already removed
pass
def maintain(self):
expire = int(time.time()) - self.maxIdleTime
idles = copy.copy(self._idle)
for p in idles:
if not p.connected:
log.msg('removing disconnected protocol %s from idle pool' % str(p))
self.remove(p)
elif p.last_access < expire:
log.msg('removing expired protocol %s' % str(p))
p.loseConnection()
self.remove(p)
busies = copy.copy(self._busy)
for p in busies:
if not p.connected:
log.msg('removing disconnected protocol %s from busy pool' % str(p))
self.remove(p)
class PooledClientFactory(protocol.ClientFactory):
protocol = BaseClientProtocol
def __init__(self, pool):
self.pool = pool
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.addr = (addr.host, addr.port)
p.pool = self.pool
return p
def clientConnectionLost(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionLost(addr, reason)
def clientConnectionFailed(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionFailed(addr, reason)
class BaseClient(object):
FactoryClass = PooledClientFactory
def __init__(self, hosts=None, connector=None, connTimeout=30, maintTime=300, deadRetryTime=5, retry=0, **kwargs):
self.factory = self.FactoryClass(self)
self.connector = connector or reactor
self.connTimeout = connTimeout
self.maintTime = maintTime
self.deadRetryTime = deadRetryTime
self.retry = retry
| self.setTimeout(self.timeOut)
self.transport.writeSequence(sequence)
| random_line_split |
base.py | Request(self):
self.finished = True
self.channel.requestFinished(self)
def submit(self):
self.submitHeaders()
if self.request.stream:
d = stream_mod.StreamProducer(self.request.stream).beginProducing(self)
d.addCallback(self.finishWriting).addErrback(self.abortWithError)
else:
self.finishWriting(None)
def submitHeaders(self):
"""Write request headers"""
r = self.request
self.channel.write("%s %s\r\n" % (r.cmd, url.encode_url_string(r.headers)))
def write(self, data):
if not data:
return
self.channel.write(data)
def finishWriting(self, x=None):
"""We are finished writing data."""
self.finishedWriting = True
def abortWithError(self, err):
if self.stream is not None:
self.stream.finish(err)
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.errback(err)
self.finishRequest()
def connectionLost(self, reason):
if not self.finished:
self.abortWithError(reason)
def createResponse(self):
if self.length:
self.stream = stream_mod.ProducerStream()
self.response = self.channel.createResponse(self)
self.stream.registerProducer(self, True)
else:
self.response = self.channel.createResponse(self)
def processResponse(self, result=None):
if result is None:
result = self.response
if self.responseDefer:
d = self.responseDefer
del self.responseDefer
d.callback(result)
def handleContentChunk(self, data):
if self.stream:
self.stream.write(data)
def registerProducer(self, producer, streaming):
"""Register a producer.
"""
self.channel.registerProducer(producer, streaming)
def unregisterProducer(self):
self.channel.unregisterProducer()
# producer interface
def pauseProducing(self):
if not self.finishedReading:
self.channel.pauseProducing()
def resumeProducing(self):
if not self.finishedReading:
self.channel.resumeProducing()
def stopProducing(self):
if not self.finishedReading:
self.channel.stopProducing()
class BaseClientProtocol(basic.LineReceiver, policies.TimeoutMixin, object):
"""Base Client Protocol"""
timeOut = 60
chanRequest = None
ChannelRequest = BaseClientChannelRequest
pool = None
def __init__(self):
self._requests = deque()
def submitRequest(self, request, *args, **kwargs):
req = self.ChannelRequest(self, request, *args, **kwargs)
if self.chanRequest is not None:
self._requests.append(req)
else:
self.chanRequest = req
req.submit()
return req.responseDefer
def write(self, data):
self.setTimeout(self.timeOut)
self.transport.write(data)
def writeSequence(self, sequence):
self.setTimeout(self.timeOut)
self.transport.writeSequence(sequence)
def | (self, line):
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.lineReceived(line)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def rawDataReceived(self, data):
"""Handle incoming content."""
if not self.chanRequest:
# server sending random unrequested data.
self.transport.loseConnection()
return
self.setTimeout(None)
try:
self.chanRequest.rawDataReceived(data)
self.setTimeout(self.timeOut)
except Exception, err:
self.chanRequest.abortWithError(failure.Failure(err))
def createResponse(self, chanRequest):
raise NotImplementedError, "must be implemented in subclass"
def requestFinished(self, request):
"""Request done."""
if self.chanRequest is not None:
del self.chanRequest
self.setTimeout(None)
if self._requests:
self.chanRequest = self._requests.popleft()
self.chanRequest.submit()
return
if self.pool and not self.transport.disconnecting:
self.pool.freeProtocol(self)
def connectionLost(self, reason):
self.setTimeout(None)
# Tell all requests to abort.
if self.chanRequest is not None:
req = self.chanRequest
del self.chanRequest
req.connectionLost(reason)
while self._requests:
self._requests.popleft().connectionLost(reason)
if self.pool:
self.pool.protocolConnectionLost(self, reason)
def loseConnection(self):
self.transport.loseConnection()
def makeConnection(self, transport):
basic.LineReceiver.makeConnection(self, transport)
if self.pool:
self.pool.protocolCreated(self)
def registerProducer(self, producer, streaming):
"""Register a producer."""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
class ClientProtocolPool(object):
def __init__(self, addr, factory, maxConn=50, maxIdleTime=600):
self.addr = addr
self.factory = factory
self.maxConn = maxConn
self.maxIdleTime = maxIdleTime
self._busy = []
self._idle = []
self._size = 0
self.dead = False
self.deferredRequests = deque()
def protocolCreated(self, protocol):
if self.dead:
self.dead = False
self._size += 1
self.touch(protocol)
if self.deferredRequests: # if there's deferred requests, return this protocol
self._busy.append(protocol)
self.deferredRequests.popleft().callback(protocol)
else:
self._idle.append(protocol)
protocol.busy = False
def deferRequest(self):
d = defer.Deferred()
self.deferredRequests.append(d)
return d
def markDead(self, reason):
log.msg('Host[%s:%s] is dead' % self.addr)
if self.dead:
return
self.dead = True
while self.deferredRequests:
self.deferredRequests.popleft().errback(reason)
self._busy = []
self._idle = []
self._size = 0
def create(self):
self.factory.createProtocol(self.addr)
return self.deferRequest()
def get(self, wait=True):
try:
p = self._idle.pop(0)
self._busy.append(p)
self.touch(p)
return p
except IndexError:
if not wait:
return None
if self._size < self.maxConn:
return self.create()
elif self._busy:
# wait busy conn to be idle
return self.deferRequest()
return None # should not happen if maxConn > 0
def touch(self, p):
p.last_access = int(time.time())
p.busy = True
def free(self, protocol):
assert protocol.addr == self.addr
if self.deferredRequests: # if there's deferred requests, return this protocol
self.touch(protocol)
self.deferredRequests.popleft().callback(protocol)
return
try:
self._busy.remove(protocol)
except:
log.err()
self._idle.append(protocol)
protocol.busy = False
def remove(self, protocol):
assert protocol.addr == self.addr
if protocol.busy:
ls = (self._busy, self._idle)
else:
ls = (self._idle, self._busy)
try:
ls[0].remove(protocol)
self._size -= 1
except:
try:
ls[1].remove(protocol)
self._size -= 1
except: # already removed
pass
def maintain(self):
expire = int(time.time()) - self.maxIdleTime
idles = copy.copy(self._idle)
for p in idles:
if not p.connected:
log.msg('removing disconnected protocol %s from idle pool' % str(p))
self.remove(p)
elif p.last_access < expire:
log.msg('removing expired protocol %s' % str(p))
p.loseConnection()
self.remove(p)
busies = copy.copy(self._busy)
for p in busies:
if not p.connected:
log.msg('removing disconnected protocol %s from busy pool' % str(p))
self.remove(p)
class PooledClientFactory(protocol.ClientFactory):
protocol = BaseClientProtocol
def __init__(self, pool):
self.pool = pool
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.addr = (addr.host, addr.port)
p.pool = self.pool
return p
def clientConnectionLost(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionLost(addr, reason)
def clientConnectionFailed(self, connector, reason):
addr = (connector.host, connector.port)
self.pool.connectionFailed(addr, reason)
class BaseClient(object):
FactoryClass = PooledClientFactory
def __init__(self, hosts=None, connector=None, connTimeout=30, maintTime=300, deadRetryTime=5, retry=0, **kwargs):
self.factory = self.FactoryClass(self)
self.connector = connector or reactor
self.connTimeout = connTimeout
self.maintTime = maintTime
self.deadRetryTime = deadRetryTime
self.retry = retry
| lineReceived | identifier_name |
temperature_driver.py | C ADDRESS/BITS/SETTINGS
# -----------------------------------------------------------------------
_BME280_ADDRESS = const(0x77)
_BME280_CHIPID = const(0x58)
_BME280_REGISTER_CHIPID = const(0xD0)
_BME280_REGISTER_DIG_T1 = const(0x88)
_BME280_REGISTER_DIG_H1 = const(0xA1)
_BME280_REGISTER_DIG_H2 = const(0xE1)
_BME280_REGISTER_DIG_H3 = const(0xE3)
_BME280_REGISTER_DIG_H4 = const(0xE4)
_BME280_REGISTER_DIG_H5 = const(0xE5)
_BME280_REGISTER_DIG_H6 = const(0xE7)
_BME280_REGISTER_SOFTRESET = const(0xE0)
_BME280_REGISTER_CTRL_HUM = const(0xF2)
_BME280_REGISTER_STATUS = const(0xF3)
_BME280_REGISTER_CTRL_MEAS = const(0xF4)
_BME280_REGISTER_CONFIG = const(0xF5)
_BME280_REGISTER_PRESSUREDATA = const(0xF7)
_BME280_REGISTER_TEMPDATA = const(0xFA)
_BME280_REGISTER_HUMIDDATA = const(0xFD)
_BME280_PRESSURE_MIN_HPA = const(300)
_BME280_PRESSURE_MAX_HPA = const(1100)
_BME280_HUMIDITY_MIN = const(0)
_BME280_HUMIDITY_MAX = const(100)
class Adafruit_BME280:
"""Driver from BME280 Temperature, Humidity and Barometic Pressure sensor"""
def __init__(self):
"""Check the BME280 was found, read the coefficients and enable the sensor for continuous
reads"""
# Check device ID.
chip_id = self._read_byte(_BME280_REGISTER_CHIPID)
if _BME280_CHIPID != chip_id:
raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)
self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)
time.sleep(0.5)
self._read_coefficients()
self.sea_level_pressure = 1013.25
"""Pressure in hectoPascals at sea level. Used to calibrate `altitude`."""
# turn on humidity oversample 16x
self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)
self._t_fine = None
def _read_temperature(self):
# perform one measurement
self._write_register_byte(_BME280_REGISTER_CTRL_MEAS, 0xFE) # high res, forced mode
# Wait for conversion to complete
while self._read_byte(_BME280_REGISTER_STATUS) & 0x08:
time.sleep(0.002)
raw_temperature = self._read24(_BME280_REGISTER_TEMPDATA) / 16 # lowest 4 bits get dropped
#print("raw temp: ", UT)
var1 = (raw_temperature / 16384.0 - self._temp_calib[0] / 1024.0) * self._temp_calib[1]
#print(var1)
var2 = ((raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0) * (
raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0)) * self._temp_calib[2]
#print(var2)
self._t_fine = int(var1 + var2)
#print("t_fine: ", self.t_fine)
@property
def temperature(self):
"""The compensated temperature in degrees celsius."""
self._read_temperature()
return self._t_fine / 5120.0
@property
def pressure(self):
"""The compensated pressure in hectoPascals."""
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if var1 == 0:
|
if var1:
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
if pressure < _BME280_PRESSURE_MIN_HPA:
return _BME280_PRESSURE_MIN_HPA
if pressure > _BME280_PRESSURE_MAX_HPA:
return _BME280_PRESSURE_MAX_HPA
return pressure
else:
return _BME280_PRESSURE_MIN_HPA
@property
def humidity(self):
"""The relative humidity in RH %"""
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity
@property
def altitude(self):
"""The altitude based on current ``pressure`` versus the sea level pressure
(``sea_level_pressure``) - which you must enter ahead of time)"""
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))
def _read_coefficients(self):
"""Read & save the calibration coefficients"""
coeff = self._read_register(_BME280_REGISTER_DIG | return 0 | conditional_block |
temperature_driver.py | 80_REGISTER_CTRL_MEAS, 0xFE) # high res, forced mode
# Wait for conversion to complete
while self._read_byte(_BME280_REGISTER_STATUS) & 0x08:
time.sleep(0.002)
raw_temperature = self._read24(_BME280_REGISTER_TEMPDATA) / 16 # lowest 4 bits get dropped
#print("raw temp: ", UT)
var1 = (raw_temperature / 16384.0 - self._temp_calib[0] / 1024.0) * self._temp_calib[1]
#print(var1)
var2 = ((raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0) * (
raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0)) * self._temp_calib[2]
#print(var2)
self._t_fine = int(var1 + var2)
#print("t_fine: ", self.t_fine)
@property
def temperature(self):
"""The compensated temperature in degrees celsius."""
self._read_temperature()
return self._t_fine / 5120.0
@property
def pressure(self):
"""The compensated pressure in hectoPascals."""
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if var1 == 0:
return 0
if var1:
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
if pressure < _BME280_PRESSURE_MIN_HPA:
return _BME280_PRESSURE_MIN_HPA
if pressure > _BME280_PRESSURE_MAX_HPA:
return _BME280_PRESSURE_MAX_HPA
return pressure
else:
return _BME280_PRESSURE_MIN_HPA
@property
def humidity(self):
"""The relative humidity in RH %"""
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity
@property
def altitude(self):
"""The altitude based on current ``pressure`` versus the sea level pressure
(``sea_level_pressure``) - which you must enter ahead of time)"""
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))
def _read_coefficients(self):
"""Read & save the calibration coefficients"""
coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)
coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))
coeff = [float(i) for i in coeff]
self._temp_calib = coeff[:3]
self._pressure_calib = coeff[3:]
self._humidity_calib = [0]*6
self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)
coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)
coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))
self._humidity_calib[1] = float(coeff[0])
self._humidity_calib[2] = float(coeff[1])
self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))
self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))
self._humidity_calib[5] = float(coeff[5])
def _read_byte(self, register):
"""Read a byte register value and return it"""
return self._read_register(register, 1)[0]
def _read24(self, register):
"""Read an unsigned 24-bit value as a floating point and return it."""
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret
def _read_register(self, register, length):
raise NotImplementedError()
def _write_register_byte(self, register, value):
raise NotImplementedError()
class Adafruit_BME280_I2C(Adafruit_BME280):
| """Driver for BME280 connected over I2C"""
def __init__(self, i2c, address=_BME280_ADDRESS):
import adafruit_bus_device.i2c_device as i2c_device
self._i2c = i2c_device.I2CDevice(i2c, address)
super().__init__()
def _read_register(self, register, length):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF]))
result = bytearray(length)
i2c.readinto(result)
#print("$%02X => %s" % (register, [hex(i) for i in result]))
return result
def _write_register_byte(self, register, value):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF, value & 0xFF]))
#print("$%02X <= 0x%02X" % (register, value))
| identifier_body |
|
temperature_driver.py | 8192.0)) * self._temp_calib[2]
#print(var2)
self._t_fine = int(var1 + var2)
#print("t_fine: ", self.t_fine)
@property
def temperature(self):
"""The compensated temperature in degrees celsius."""
self._read_temperature()
return self._t_fine / 5120.0
@property
def pressure(self):
"""The compensated pressure in hectoPascals."""
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if var1 == 0:
return 0
if var1:
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
if pressure < _BME280_PRESSURE_MIN_HPA:
return _BME280_PRESSURE_MIN_HPA
if pressure > _BME280_PRESSURE_MAX_HPA:
return _BME280_PRESSURE_MAX_HPA
return pressure
else:
return _BME280_PRESSURE_MIN_HPA
@property
def humidity(self):
"""The relative humidity in RH %"""
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity
@property
def altitude(self):
"""The altitude based on current ``pressure`` versus the sea level pressure
(``sea_level_pressure``) - which you must enter ahead of time)"""
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))
def _read_coefficients(self):
"""Read & save the calibration coefficients"""
coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)
coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))
coeff = [float(i) for i in coeff]
self._temp_calib = coeff[:3]
self._pressure_calib = coeff[3:]
self._humidity_calib = [0]*6
self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)
coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)
coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))
self._humidity_calib[1] = float(coeff[0])
self._humidity_calib[2] = float(coeff[1])
self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))
self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))
self._humidity_calib[5] = float(coeff[5])
def _read_byte(self, register):
"""Read a byte register value and return it"""
return self._read_register(register, 1)[0]
def _read24(self, register):
"""Read an unsigned 24-bit value as a floating point and return it."""
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret
def _read_register(self, register, length):
raise NotImplementedError()
def _write_register_byte(self, register, value):
raise NotImplementedError()
class Adafruit_BME280_I2C(Adafruit_BME280):
"""Driver for BME280 connected over I2C"""
def __init__(self, i2c, address=_BME280_ADDRESS):
import adafruit_bus_device.i2c_device as i2c_device
self._i2c = i2c_device.I2CDevice(i2c, address)
super().__init__()
def _read_register(self, register, length):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF]))
result = bytearray(length)
i2c.readinto(result)
#print("$%02X => %s" % (register, [hex(i) for i in result]))
return result
def _write_register_byte(self, register, value):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF, value & 0xFF]))
#print("$%02X <= 0x%02X" % (register, value))
class Adafruit_BME280_SPI(Adafruit_BME280):
"""Driver for BME280 connected over SPI"""
def __init__(self, spi, cs, baudrate=100000):
import adafruit_bus_device.spi_device as spi_device
self._spi = spi_device.SPIDevice(spi, cs, baudrate=baudrate)
super().__init__()
def _read_register(self, register, length):
register = (register | 0x80) & 0xFF # Read single, bit 7 high.
with self._spi as spi:
spi.write(bytearray([register])) #pylint: disable=no-member
result = bytearray(length)
spi.readinto(result) #pylint: disable=no-member
#print("$%02X => %s" % (register, [hex(i) for i in result]))
return result
def | _write_register_byte | identifier_name |
|
temperature_driver.py | C ADDRESS/BITS/SETTINGS
# -----------------------------------------------------------------------
_BME280_ADDRESS = const(0x77)
_BME280_CHIPID = const(0x58)
_BME280_REGISTER_CHIPID = const(0xD0)
_BME280_REGISTER_DIG_T1 = const(0x88)
_BME280_REGISTER_DIG_H1 = const(0xA1)
_BME280_REGISTER_DIG_H2 = const(0xE1)
_BME280_REGISTER_DIG_H3 = const(0xE3)
_BME280_REGISTER_DIG_H4 = const(0xE4)
_BME280_REGISTER_DIG_H5 = const(0xE5)
_BME280_REGISTER_DIG_H6 = const(0xE7)
_BME280_REGISTER_SOFTRESET = const(0xE0)
_BME280_REGISTER_CTRL_HUM = const(0xF2)
_BME280_REGISTER_STATUS = const(0xF3)
_BME280_REGISTER_CTRL_MEAS = const(0xF4)
_BME280_REGISTER_CONFIG = const(0xF5)
_BME280_REGISTER_PRESSUREDATA = const(0xF7)
_BME280_REGISTER_TEMPDATA = const(0xFA)
_BME280_REGISTER_HUMIDDATA = const(0xFD)
_BME280_PRESSURE_MIN_HPA = const(300)
_BME280_PRESSURE_MAX_HPA = const(1100)
_BME280_HUMIDITY_MIN = const(0)
_BME280_HUMIDITY_MAX = const(100)
class Adafruit_BME280:
"""Driver from BME280 Temperature, Humidity and Barometic Pressure sensor"""
def __init__(self):
"""Check the BME280 was found, read the coefficients and enable the sensor for continuous
reads"""
# Check device ID.
chip_id = self._read_byte(_BME280_REGISTER_CHIPID)
if _BME280_CHIPID != chip_id:
raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)
self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)
time.sleep(0.5)
self._read_coefficients()
self.sea_level_pressure = 1013.25
"""Pressure in hectoPascals at sea level. Used to calibrate `altitude`."""
# turn on humidity oversample 16x
self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)
self._t_fine = None
def _read_temperature(self):
# perform one measurement
self._write_register_byte(_BME280_REGISTER_CTRL_MEAS, 0xFE) # high res, forced mode
# Wait for conversion to complete
while self._read_byte(_BME280_REGISTER_STATUS) & 0x08:
time.sleep(0.002)
raw_temperature = self._read24(_BME280_REGISTER_TEMPDATA) / 16 # lowest 4 bits get dropped
#print("raw temp: ", UT)
var1 = (raw_temperature / 16384.0 - self._temp_calib[0] / 1024.0) * self._temp_calib[1]
#print(var1)
var2 = ((raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0) * (
raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0)) * self._temp_calib[2]
#print(var2)
self._t_fine = int(var1 + var2)
#print("t_fine: ", self.t_fine)
@property
def temperature(self):
"""The compensated temperature in degrees celsius."""
self._read_temperature()
return self._t_fine / 5120.0
@property
def pressure(self):
"""The compensated pressure in hectoPascals."""
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if var1 == 0:
return 0
if var1:
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
if pressure < _BME280_PRESSURE_MIN_HPA:
return _BME280_PRESSURE_MIN_HPA
if pressure > _BME280_PRESSURE_MAX_HPA:
return _BME280_PRESSURE_MAX_HPA
return pressure
else:
return _BME280_PRESSURE_MIN_HPA
@property
def humidity(self):
"""The relative humidity in RH %"""
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity
@property
def altitude(self):
| def _read_coefficients(self):
"""Read & save the calibration coefficients"""
coeff = self._read_register(_BME280 | """The altitude based on current ``pressure`` versus the sea level pressure
(``sea_level_pressure``) - which you must enter ahead of time)"""
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))
| random_line_split |
main.go | .Manifest.Images {
imageMapping := ImageMapping{
ManifestName: manifestImage.FullName(),
}
imageMapping.Objects.Manifest = manifestImage
imageMapping.Objects.GitHubRepo = cache.GithubRepos[manifestImage.RepoPath()]
imageMapping.Objects.GitHubLastRef = cache.GithubLastRefs[manifestImage.RepoPath()]
manifestImageName := ImageCodeName(manifestImage.Name)
apiImages := c.Api.Images
for idx := range *apiImages {
apiImage := (*apiImages)[idx]
apiImageName := ImageCodeName(apiImage.Name)
if rankMatch := fuzzy.RankMatch(manifestImageName, apiImageName); rankMatch > -1 {
imageMapping.ApiUUID = apiImage.Identifier
imageMapping.RankMatch = rankMatch
imageMapping.Found++
imageMapping.Objects.Api = &apiImage
}
}
c.Mapping.Images = append(c.Mapping.Images, &imageMapping)
}
logrus.Infof("Images mapped")
}
type ImageMapping struct {
ApiUUID string `json:"api_uuid"`
ManifestName string `json:"manifest_name"`
RankMatch int `json:"rank_match"`
Found int `json:"found"`
Objects struct {
Api *api.ScalewayImage `json:"api"`
Manifest *scwImage.Image `json:"manifest"`
GitHubRepo *github.Repository `json:"github_repo"`
GitHubLastRef *github.Reference `json:"github_last_ref"`
} `json:"objects"`
}
func (i *ImageMapping) MatchName(input string) bool {
if input == i.ApiUUID {
return true
}
input = ImageCodeName(input)
if fuzzy.RankMatch(input, ImageCodeName(i.ManifestName)) > -1 {
return true
}
for _, tag := range i.Objects.Manifest.Tags {
nameWithTag := ImageCodeName(fmt.Sprintf("%s-%s", i.Objects.Manifest.Name, tag))
if fuzzy.RankMatch(input, nameWithTag) > -1 {
return true
}
}
return false
}
| name := strings.ToLower(inputName)
name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-")
name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-")
name = strings.Trim(name, "-")
return name
}
func main() {
router := gin.Default()
router.StaticFile("/", "./static/index.html")
router.Static("/static", "./static")
router.Static("/bower_components", "./bower_components")
// FIXME: favicon
router.GET("/api/images", imagesEndpoint)
router.GET("/api/images/:name", imageEndpoint)
router.GET("/api/images/:name/dockerfile", imageDockerfileEndpoint)
router.GET("/api/images/:name/makefile", imageMakefileEndpoint)
router.GET("/api/bootscripts", bootscriptsEndpoint)
router.GET("/api/bootscripts/:id", bootscriptEndpoint)
router.GET("/api/cache", cacheEndpoint)
router.GET("/images/:name/new-server", newServerEndpoint)
router.GET("/badges/images/:name/scw-build.svg", badgeImageScwBuild)
// router.GET("/images/:name/badge", imageBadgeEndpoint)
Api, err := api.NewScalewayAPI("https://api.scaleway.com", "", os.Getenv("SCALEWAY_ORGANIZATION"), os.Getenv("SCALEWAY_TOKEN"))
if err != nil {
logrus.Fatalf("Failed to initialize Scaleway Api: %v", err)
}
if os.Getenv("GITHUB_TOKEN") != "" {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
gh := github.NewClient(tc)
go updateGitHub(gh, cache)
} else {
logrus.Errorf("GITHUB_TOKEN empty")
}
go updateManifestCron(cache)
go updateScwApiImages(Api, cache)
go updateScwApiBootscripts(Api, cache)
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
router.Run(fmt.Sprintf(":%s", port))
}
func httpGetContent(url string) (string, error) {
httpMutex.Lock()
defer httpMutex.Unlock()
logrus.Warnf("Fetching HTTP content: %s", url)
request := gorequest.New()
_, body, errs := request.Get(url).End()
if len(errs) > 0 {
// FIXME: return all the errs
return "", errs[0]
}
return body, nil
}
func getBadge(left, right, color string) (string, error) {
badgeMutex.Lock()
defer badgeMutex.Unlock()
left = strings.Replace(left, "-", "--", -1)
right = strings.Replace(right, "-", "--", -1)
left = strings.Replace(left, " ", "_", -1)
right = strings.Replace(right, " ", "_", -1)
url := fmt.Sprintf("https://img.shields.io/badge/%s-%s-%s.svg", left, right, color)
body, err := memoize.Call(httpGetContent, url)
if err != nil {
return errBadge(left, fmt.Errorf("http error")), err
}
return body.(string), nil
}
func errBadge(left string, err error) string {
logrus.Warnf("Failed to get badge: %v", err)
// FIXME: remove the switch and compute the left width automatically
switch left {
case "build":
return `<svg xmlns="http://www.w3.org/2000/svg" width="115" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="115" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h37v20H0z"/><path fill="#9f9f9f" d="M37 0h78v20H37z"/><path fill="url(#b)" d="M0 0h115v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="18.5" y="15" fill="#010101" fill-opacity=".3">build</text><text x="18.5" y="14">build</text><text x="75" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="75" y="14">inaccessible</text></g></svg>`
default:
return strings.Replace(`<svg xmlns="http://www.w3.org/2000/svg" width="133" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="133" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h55v20H0z"/><path fill="#9f9f9f" d="M55 0h78v20H55z"/><path fill="url(#b)" d="M0 0h133v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="27.5" y="15" fill="#010101" fill-opacity=".3">{{ .Left }}</text><text x="27.5" y="14">{{ .Left }}</text><text x="93" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="93" y="14">inaccessible</text></g></svg>`, "{{ .Left }}", left, -1)
}
}
func badgeImageScwBuild(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
left := "build"
c.Header("Content-Type", "image/svg+xml;charset=utf-8")
switch len(images) {
case 0:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("no such image")))
case 1:
image := images[0]
if image.Objects.Api == nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid resource")))
return
}
creationDate, err := time.Parse(time.RFC3339, image.Objects.Api.CreationDate)
if err != nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid-date")))
return
}
humanTime := humanize.Time(creationDate)
humanTime = strings.Replace(humanTime, " ago", "", -1)
badge, err := getBadge(left, humanTime, "green")
| func ImageCodeName(inputName string) string { | random_line_split |
main.go | (#a)"><path fill="#555" d="M0 0h37v20H0z"/><path fill="#9f9f9f" d="M37 0h78v20H37z"/><path fill="url(#b)" d="M0 0h115v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="18.5" y="15" fill="#010101" fill-opacity=".3">build</text><text x="18.5" y="14">build</text><text x="75" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="75" y="14">inaccessible</text></g></svg>`
default:
return strings.Replace(`<svg xmlns="http://www.w3.org/2000/svg" width="133" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="133" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h55v20H0z"/><path fill="#9f9f9f" d="M55 0h78v20H55z"/><path fill="url(#b)" d="M0 0h133v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="27.5" y="15" fill="#010101" fill-opacity=".3">{{ .Left }}</text><text x="27.5" y="14">{{ .Left }}</text><text x="93" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="93" y="14">inaccessible</text></g></svg>`, "{{ .Left }}", left, -1)
}
}
func badgeImageScwBuild(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
left := "build"
c.Header("Content-Type", "image/svg+xml;charset=utf-8")
switch len(images) {
case 0:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("no such image")))
case 1:
image := images[0]
if image.Objects.Api == nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid resource")))
return
}
creationDate, err := time.Parse(time.RFC3339, image.Objects.Api.CreationDate)
if err != nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid-date")))
return
}
humanTime := humanize.Time(creationDate)
humanTime = strings.Replace(humanTime, " ago", "", -1)
badge, err := getBadge(left, humanTime, "green")
if err != nil {
c.String(http.StatusInternalServerError, badge)
return
}
c.String(http.StatusOK, badge)
default:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("ambiguous name")))
}
}
func cacheEndpoint(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"cache": cache,
})
}
func imagesEndpoint(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"images": cache.Mapping.Images,
})
}
func bootscriptsEndpoint(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"bootscripts": cache.Api.Bootscripts,
})
}
func imageDockerfileEndpoint(c *gin.Context) {
name := c.Param("name")
image := cache.Manifest.Images[name]
dockerfile, err := image.GetDockerfile()
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"error": fmt.Sprintf("%v", err),
})
}
c.String(http.StatusOK, dockerfile)
}
func imageMakefileEndpoint(c *gin.Context) {
name := c.Param("name")
image := cache.Manifest.Images[name]
makefile, err := image.GetMakefile()
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"error": fmt.Sprintf("%v", err),
})
}
c.String(http.StatusOK, makefile)
}
func imageEndpoint(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
switch len(images) {
case 0:
c.JSON(http.StatusNotFound, gin.H{
"error": "No such image",
})
case 1:
c.JSON(http.StatusOK, gin.H{
"image": images[0],
})
default:
c.JSON(http.StatusNotFound, gin.H{
"error": "Too much images are matching your request",
"images": images,
})
}
}
func bootscriptEndpoint(c *gin.Context) {
id := c.Param("id")
bootscripts := cache.GetBootscriptById(id)
switch len(bootscripts) {
case 0:
c.JSON(http.StatusNotFound, gin.H{
"error": "No such bootscript",
})
case 1:
c.JSON(http.StatusOK, gin.H{
"bootscript": bootscripts[0],
})
default:
c.JSON(http.StatusNotFound, gin.H{
"error": "Too much bootscripts are matching your request",
"bootscripts": bootscripts,
})
}
}
func newServerEndpoint(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
switch len(images) {
case 0:
c.JSON(http.StatusNotFound, gin.H{
"error": "No such image",
})
case 1:
c.Redirect(http.StatusFound, fmt.Sprintf("https://cloud.scaleway.com/#/servers/new?image=%s", images[0].ApiUUID))
default:
c.JSON(http.StatusNotFound, gin.H{
"error": "Too much images are matching your request",
"images": images,
})
}
}
func updateScwApiImages(Api *api.ScalewayAPI, cache *Cache) {
for {
logrus.Infof("Fetching images from the Api...")
images, err := Api.GetImages()
if err != nil {
logrus.Errorf("Failed to retrieve images list from the Api: %v", err)
} else {
cache.Api.Images = images
logrus.Infof("Images fetched: %d images", len(*images))
cache.MapImages()
}
time.Sleep(5 * time.Minute)
}
}
func updateGitHub(gh *github.Client, cache *Cache) {
for {
logrus.Infof("Fetching GitHub...")
for cache.Manifest == nil || len(cache.Manifest.Images) == 0 {
time.Sleep(time.Second)
}
changes := 0
for _, image := range cache.Manifest.Images {
if _, found := cache.GithubRepos[image.RepoPath()]; !found {
changes++
repo, err := image.GithubGetRepo(gh)
if err != nil {
logrus.Warnf("Failed to fetch repo %q: %v", image.RepoPath(), err)
} else {
cache.GithubRepos[image.RepoPath()] = repo
}
}
if _, found := cache.GithubLastRefs[image.RepoPath()]; !found {
changes++
ref, err := image.GithubGetLastRef(gh)
if err != nil {
logrus.Warnf("Failed to fetch repo last reference %q: %v", image.RepoPath(), err)
} else {
cache.GithubLastRefs[image.RepoPath()] = ref
}
}
}
if changes > 0 {
cache.MapImages()
}
time.Sleep(5 * time.Minute)
}
}
func updateScwApiBootscripts(Api *api.ScalewayAPI, cache *Cache) {
for {
logrus.Infof("Fetching bootscripts from the Api...")
bootscripts, err := Api.GetBootscripts()
if err != nil {
logrus.Errorf("Failed to retrieve bootscripts list from the Api: %v", err)
} else {
cache.Api.Bootscripts = bootscripts
logrus.Infof("Bootscripts fetched: %d bootscripts", len(*bootscripts))
}
time.Sleep(5 * time.Minute)
}
}
func updateManifestCron(cache *Cache) {
for | {
logrus.Infof("Fetching manifest...")
manifest, err := scwManifest.GetManifest()
if err != nil {
logrus.Errorf("Cannot get manifest: %v", err)
} else {
cache.Manifest = manifest
logrus.Infof("Manifest fetched: %d images", len(manifest.Images))
cache.MapImages()
}
time.Sleep(5 * time.Minute)
} | conditional_block |
|
main.go | .Images {
imageMapping := ImageMapping{
ManifestName: manifestImage.FullName(),
}
imageMapping.Objects.Manifest = manifestImage
imageMapping.Objects.GitHubRepo = cache.GithubRepos[manifestImage.RepoPath()]
imageMapping.Objects.GitHubLastRef = cache.GithubLastRefs[manifestImage.RepoPath()]
manifestImageName := ImageCodeName(manifestImage.Name)
apiImages := c.Api.Images
for idx := range *apiImages {
apiImage := (*apiImages)[idx]
apiImageName := ImageCodeName(apiImage.Name)
if rankMatch := fuzzy.RankMatch(manifestImageName, apiImageName); rankMatch > -1 {
imageMapping.ApiUUID = apiImage.Identifier
imageMapping.RankMatch = rankMatch
imageMapping.Found++
imageMapping.Objects.Api = &apiImage
}
}
c.Mapping.Images = append(c.Mapping.Images, &imageMapping)
}
logrus.Infof("Images mapped")
}
type ImageMapping struct {
ApiUUID string `json:"api_uuid"`
ManifestName string `json:"manifest_name"`
RankMatch int `json:"rank_match"`
Found int `json:"found"`
Objects struct {
Api *api.ScalewayImage `json:"api"`
Manifest *scwImage.Image `json:"manifest"`
GitHubRepo *github.Repository `json:"github_repo"`
GitHubLastRef *github.Reference `json:"github_last_ref"`
} `json:"objects"`
}
func (i *ImageMapping) MatchName(input string) bool {
if input == i.ApiUUID {
return true
}
input = ImageCodeName(input)
if fuzzy.RankMatch(input, ImageCodeName(i.ManifestName)) > -1 {
return true
}
for _, tag := range i.Objects.Manifest.Tags {
nameWithTag := ImageCodeName(fmt.Sprintf("%s-%s", i.Objects.Manifest.Name, tag))
if fuzzy.RankMatch(input, nameWithTag) > -1 {
return true
}
}
return false
}
func ImageCodeName(inputName string) string {
name := strings.ToLower(inputName)
name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-")
name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-")
name = strings.Trim(name, "-")
return name
}
func main() {
router := gin.Default()
router.StaticFile("/", "./static/index.html")
router.Static("/static", "./static")
router.Static("/bower_components", "./bower_components")
// FIXME: favicon
router.GET("/api/images", imagesEndpoint)
router.GET("/api/images/:name", imageEndpoint)
router.GET("/api/images/:name/dockerfile", imageDockerfileEndpoint)
router.GET("/api/images/:name/makefile", imageMakefileEndpoint)
router.GET("/api/bootscripts", bootscriptsEndpoint)
router.GET("/api/bootscripts/:id", bootscriptEndpoint)
router.GET("/api/cache", cacheEndpoint)
router.GET("/images/:name/new-server", newServerEndpoint)
router.GET("/badges/images/:name/scw-build.svg", badgeImageScwBuild)
// router.GET("/images/:name/badge", imageBadgeEndpoint)
Api, err := api.NewScalewayAPI("https://api.scaleway.com", "", os.Getenv("SCALEWAY_ORGANIZATION"), os.Getenv("SCALEWAY_TOKEN"))
if err != nil {
logrus.Fatalf("Failed to initialize Scaleway Api: %v", err)
}
if os.Getenv("GITHUB_TOKEN") != "" {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
gh := github.NewClient(tc)
go updateGitHub(gh, cache)
} else {
logrus.Errorf("GITHUB_TOKEN empty")
}
go updateManifestCron(cache)
go updateScwApiImages(Api, cache)
go updateScwApiBootscripts(Api, cache)
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
router.Run(fmt.Sprintf(":%s", port))
}
func httpGetContent(url string) (string, error) {
httpMutex.Lock()
defer httpMutex.Unlock()
logrus.Warnf("Fetching HTTP content: %s", url)
request := gorequest.New()
_, body, errs := request.Get(url).End()
if len(errs) > 0 {
// FIXME: return all the errs
return "", errs[0]
}
return body, nil
}
func getBadge(left, right, color string) (string, error) {
badgeMutex.Lock()
defer badgeMutex.Unlock()
left = strings.Replace(left, "-", "--", -1)
right = strings.Replace(right, "-", "--", -1)
left = strings.Replace(left, " ", "_", -1)
right = strings.Replace(right, " ", "_", -1)
url := fmt.Sprintf("https://img.shields.io/badge/%s-%s-%s.svg", left, right, color)
body, err := memoize.Call(httpGetContent, url)
if err != nil {
return errBadge(left, fmt.Errorf("http error")), err
}
return body.(string), nil
}
func | (left string, err error) string {
logrus.Warnf("Failed to get badge: %v", err)
// FIXME: remove the switch and compute the left width automatically
switch left {
case "build":
return `<svg xmlns="http://www.w3.org/2000/svg" width="115" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="115" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h37v20H0z"/><path fill="#9f9f9f" d="M37 0h78v20H37z"/><path fill="url(#b)" d="M0 0h115v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="18.5" y="15" fill="#010101" fill-opacity=".3">build</text><text x="18.5" y="14">build</text><text x="75" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="75" y="14">inaccessible</text></g></svg>`
default:
return strings.Replace(`<svg xmlns="http://www.w3.org/2000/svg" width="133" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="133" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h55v20H0z"/><path fill="#9f9f9f" d="M55 0h78v20H55z"/><path fill="url(#b)" d="M0 0h133v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="27.5" y="15" fill="#010101" fill-opacity=".3">{{ .Left }}</text><text x="27.5" y="14">{{ .Left }}</text><text x="93" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="93" y="14">inaccessible</text></g></svg>`, "{{ .Left }}", left, -1)
}
}
func badgeImageScwBuild(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
left := "build"
c.Header("Content-Type", "image/svg+xml;charset=utf-8")
switch len(images) {
case 0:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("no such image")))
case 1:
image := images[0]
if image.Objects.Api == nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid resource")))
return
}
creationDate, err := time.Parse(time.RFC3339, image.Objects.Api.CreationDate)
if err != nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid-date")))
return
}
humanTime := humanize.Time(creationDate)
humanTime = strings.Replace(humanTime, " ago", "", -1)
badge, err := getBadge(left, humanTime, "green")
| errBadge | identifier_name |
main.go | ImageName := ImageCodeName(manifestImage.Name)
apiImages := c.Api.Images
for idx := range *apiImages {
apiImage := (*apiImages)[idx]
apiImageName := ImageCodeName(apiImage.Name)
if rankMatch := fuzzy.RankMatch(manifestImageName, apiImageName); rankMatch > -1 {
imageMapping.ApiUUID = apiImage.Identifier
imageMapping.RankMatch = rankMatch
imageMapping.Found++
imageMapping.Objects.Api = &apiImage
}
}
c.Mapping.Images = append(c.Mapping.Images, &imageMapping)
}
logrus.Infof("Images mapped")
}
type ImageMapping struct {
ApiUUID string `json:"api_uuid"`
ManifestName string `json:"manifest_name"`
RankMatch int `json:"rank_match"`
Found int `json:"found"`
Objects struct {
Api *api.ScalewayImage `json:"api"`
Manifest *scwImage.Image `json:"manifest"`
GitHubRepo *github.Repository `json:"github_repo"`
GitHubLastRef *github.Reference `json:"github_last_ref"`
} `json:"objects"`
}
func (i *ImageMapping) MatchName(input string) bool {
if input == i.ApiUUID {
return true
}
input = ImageCodeName(input)
if fuzzy.RankMatch(input, ImageCodeName(i.ManifestName)) > -1 {
return true
}
for _, tag := range i.Objects.Manifest.Tags {
nameWithTag := ImageCodeName(fmt.Sprintf("%s-%s", i.Objects.Manifest.Name, tag))
if fuzzy.RankMatch(input, nameWithTag) > -1 {
return true
}
}
return false
}
func ImageCodeName(inputName string) string {
name := strings.ToLower(inputName)
name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-")
name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-")
name = strings.Trim(name, "-")
return name
}
func main() {
router := gin.Default()
router.StaticFile("/", "./static/index.html")
router.Static("/static", "./static")
router.Static("/bower_components", "./bower_components")
// FIXME: favicon
router.GET("/api/images", imagesEndpoint)
router.GET("/api/images/:name", imageEndpoint)
router.GET("/api/images/:name/dockerfile", imageDockerfileEndpoint)
router.GET("/api/images/:name/makefile", imageMakefileEndpoint)
router.GET("/api/bootscripts", bootscriptsEndpoint)
router.GET("/api/bootscripts/:id", bootscriptEndpoint)
router.GET("/api/cache", cacheEndpoint)
router.GET("/images/:name/new-server", newServerEndpoint)
router.GET("/badges/images/:name/scw-build.svg", badgeImageScwBuild)
// router.GET("/images/:name/badge", imageBadgeEndpoint)
Api, err := api.NewScalewayAPI("https://api.scaleway.com", "", os.Getenv("SCALEWAY_ORGANIZATION"), os.Getenv("SCALEWAY_TOKEN"))
if err != nil {
logrus.Fatalf("Failed to initialize Scaleway Api: %v", err)
}
if os.Getenv("GITHUB_TOKEN") != "" {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
gh := github.NewClient(tc)
go updateGitHub(gh, cache)
} else {
logrus.Errorf("GITHUB_TOKEN empty")
}
go updateManifestCron(cache)
go updateScwApiImages(Api, cache)
go updateScwApiBootscripts(Api, cache)
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
router.Run(fmt.Sprintf(":%s", port))
}
func httpGetContent(url string) (string, error) {
httpMutex.Lock()
defer httpMutex.Unlock()
logrus.Warnf("Fetching HTTP content: %s", url)
request := gorequest.New()
_, body, errs := request.Get(url).End()
if len(errs) > 0 {
// FIXME: return all the errs
return "", errs[0]
}
return body, nil
}
func getBadge(left, right, color string) (string, error) {
badgeMutex.Lock()
defer badgeMutex.Unlock()
left = strings.Replace(left, "-", "--", -1)
right = strings.Replace(right, "-", "--", -1)
left = strings.Replace(left, " ", "_", -1)
right = strings.Replace(right, " ", "_", -1)
url := fmt.Sprintf("https://img.shields.io/badge/%s-%s-%s.svg", left, right, color)
body, err := memoize.Call(httpGetContent, url)
if err != nil {
return errBadge(left, fmt.Errorf("http error")), err
}
return body.(string), nil
}
func errBadge(left string, err error) string {
logrus.Warnf("Failed to get badge: %v", err)
// FIXME: remove the switch and compute the left width automatically
switch left {
case "build":
return `<svg xmlns="http://www.w3.org/2000/svg" width="115" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="115" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h37v20H0z"/><path fill="#9f9f9f" d="M37 0h78v20H37z"/><path fill="url(#b)" d="M0 0h115v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="18.5" y="15" fill="#010101" fill-opacity=".3">build</text><text x="18.5" y="14">build</text><text x="75" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="75" y="14">inaccessible</text></g></svg>`
default:
return strings.Replace(`<svg xmlns="http://www.w3.org/2000/svg" width="133" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="133" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h55v20H0z"/><path fill="#9f9f9f" d="M55 0h78v20H55z"/><path fill="url(#b)" d="M0 0h133v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="27.5" y="15" fill="#010101" fill-opacity=".3">{{ .Left }}</text><text x="27.5" y="14">{{ .Left }}</text><text x="93" y="15" fill="#010101" fill-opacity=".3">inaccessible</text><text x="93" y="14">inaccessible</text></g></svg>`, "{{ .Left }}", left, -1)
}
}
func badgeImageScwBuild(c *gin.Context) {
name := c.Param("name")
images := cache.GetImageByName(name)
left := "build"
c.Header("Content-Type", "image/svg+xml;charset=utf-8")
switch len(images) {
case 0:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("no such image")))
case 1:
image := images[0]
if image.Objects.Api == nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid resource")))
return
}
creationDate, err := time.Parse(time.RFC3339, image.Objects.Api.CreationDate)
if err != nil {
c.String(http.StatusInternalServerError, errBadge(left, fmt.Errorf("invalid-date")))
return
}
humanTime := humanize.Time(creationDate)
humanTime = strings.Replace(humanTime, " ago", "", -1)
badge, err := getBadge(left, humanTime, "green")
if err != nil {
c.String(http.StatusInternalServerError, badge)
return
}
c.String(http.StatusOK, badge)
default:
c.String(http.StatusNotFound, errBadge(left, fmt.Errorf("ambiguous name")))
}
}
func cacheEndpoint(c *gin.Context) | {
c.JSON(http.StatusOK, gin.H{
"cache": cache,
})
} | identifier_body |
|
timeline-custom-shiny.js | stringToDate(item.startDate);
obj.content = html;
processedDatas.push(obj);
}
this.parsedData = processedDatas;
return this;
};
timeline.prototype.drawVisualization = function() { // Create and populate a data table.
options = {
'height': "200px",
'width': "100%",
'start': new Date(visiblefirstdate, 0),
'end': new Date(visiblelastdate, 0),
'min': new Date(1850, 0, 0),
'max': new Date(2015, 0, 0),
'invervalMin': 1000 * 60 * 60 * 24 * 31 * 3,
'editable': false,
'animate': true,
'selectable': false,
'style': "box",
'showNavigation': true,
'showCurrentTime': false
};
// Instantiate our timeline object.
this.vis = new links.Timeline(document.getElementById('mytimeline'));
// Attach event listeners
links.events.addListener(this.vis, 'rangechange', onrangechange);
// Draw our timeline with the created data and options
this.vis.draw(this.parsedData, options);
//set the divs to the proper starting dates
document.getElementById('startDate').value = this.dateToString(vis.start);
document.getElementById('endDate').value = this.dateToString(vis.end);
};
timeline.prototype.setTime = function(startdate, enddate) {
if (!this.vis) return;
var newStartDate, newEndDate, sliderSet;
if (!startdate) {
newStartDate = new Date(stringToDate(document.getElementById('startDate').value));
newEndDate = new Date(stringToDate(document.getElementById('endDate').value));
sliderSet = false;
} else {
newStartDate = new Date(startdate, 0); //0 = working with years. should change this is parse
newEndDate = new Date(enddate, 0);
sliderSet = true;
}
this.vis.setVisibleChartRange(newStartDate, newEndDate);
this.onrangechange(sliderSet);
};
timeline.prototype.onRangeChange = function(sliderSet) {
var range = vis.getVisibleChartRange()
, totalStartDate = range.start
, totalEndDate = range.end;
document.getElementById('startDate').value = dateToString(range.start);
document.getElementById('endDate').value = dateToString(range.end);
if (sliderSet !== true) {
$("#slider-range").dragslider("option", "values", [this.dateToString(range.start), this.dateToString(range.end)]);
}
};
timeline.prototype.stringToDate = function(input,format) {
var stringparts = input.split('-');
if (stringparts.length == 1) {
return new Date(input, 0);
} else {
format = format || 'yyyy-mm-dd'; // default format
var parts = input.match(/(\d+)/g),
i = 0,
fmt = {};
// extract date-part indexes from the format
format.replace(/(yyyy|dd|mm)/g, function(part) {
fmt[part] = i++;
});
return new Date(parts[fmt['yyyy']], parts[fmt['mm']] - 1, parts[fmt['dd']]);
}
};
timeline.prototype.dateToString = function(date){
return date.getFullYear();
};
timeline.prototype.moveTimeline = function(degree){
this.vis.move(degree);
this.onRangeChange();
};
timeline.prototype.addCaption = function() {
$('a > img[style]').each(function() {
$el = $(this);
var style = $el.attr('style');
$el.attr('style', '');
$el.parent().attr('style', style);
}); //Moves the inline styles
$("img").each(function() {
var title = $(this).attr('alt');
$(this).after('<span class="caption">' + title + '</span>');
}); //Adds the dynamic captions.
};
timeline.prototype.drawSliderBackground = function() {
var wrapperWidth = $('#mytimelinewrapper').width();
$('#slider-range').css('width', wrapperWidth);
var numDivisions = 8
, pixelBlockSize = wrapperWidth / numDivisions
, yearBlockSize = ((endDate - startDate) / (numDivisions));
if ($('.slider-date').length > 0) {
$('.slider-date').remove();
}
for (var i = 1; i < numDivisions; i++) {
var pixelTick = pixelBlockSize * i
, yearTickDecimal = (yearBlockSize * i) + startDate
, yearTick = yearTickDecimal.toPrecision(4)
, element = document.createElement('div');
element.className = "slider-date";
element.style.position = "absolute";
element.style.left = pixelTick + 'px';
document.getElementById("slider").appendChild(element);
element.innerHTML = yearTick;
}
};
// Adds dragging feature to the jQuery UI Slider.
(function($, undefined) {
$.widget("ui.dragslider", $.ui.slider, {
options: $.extend({}, $.ui.slider.prototype.options, {
rangeDrag: false
}),
_create: function() {
$.ui.slider.prototype._create.apply(this, arguments);
this._rangeCapture = false;
},
_mouseCapture: function(event) {
var o = this.options;
if (o.disabled) return false;
if (event.target == this.range.get(0) && o.rangeDrag === true && o.range === true) {
this._rangeCapture = true;
this._rangeStart = null;
} else {
this._rangeCapture = false;
}
$.ui.slider.prototype._mouseCapture.apply(this, arguments);
if (this._rangeCapture == true) {
this.handles.removeClass("ui-state-active").blur();
}
return true;
},
_mouseStop: function(event) {
this._rangeStart = null;
return $.ui.slider.prototype._mouseStop.apply(this, arguments);
},
_slide: function(event, index, newVal) {
if (!this._rangeCapture) {
return $.ui.slider.prototype._slide.apply(this, arguments);
}
if (this._rangeStart == null) {
this._rangeStart = newVal;
}
var oldValLeft = this.options.values[0],
oldValRight = this.options.values[1],
slideDist = newVal - this._rangeStart,
newValueLeft = oldValLeft + slideDist,
newValueRight = oldValRight + slideDist,
allowed;
if (this.options.values && this.options.values.length) {
if (newValueRight > this._valueMax() && slideDist > 0) {
slideDist -= (newValueRight - this._valueMax());
newValueLeft = oldValLeft + slideDist;
newValueRight = oldValRight + slideDist;
}
if (newValueLeft < this._valueMin()) {
slideDist += (this._valueMin() - newValueLeft);
newValueLeft = oldValLeft + slideDist;
newValueRight = oldValRight + slideDist;
}
if (slideDist != 0) {
newValues = this.values();
newValues[0] = newValueLeft;
newValues[1] = newValueRight;
// A slide can be canceled by returning false from the slide callback
allowed = this._trigger("slide", event, {
handle: this.handles[index],
value: slideDist,
values: newValues
});
if (allowed !== false) {
this.values(0, newValueLeft, true);
this.values(1, newValueRight, true);
} | });
})(jQuery);
timeline.prototype.addSlider = function(){
var self = this;
this.drawSliderBackground();
$("#slider-range").dragslider({
range: true,
min: self.startDate,
max: self.endDate,
animate: true,
rangeDrag: true,
values: [self.visibleFirstDate, self.visibleLastDate],
slide: function(event, ui) {
var startingDate = ui.values[0]
, endingDate = ui.values[1];
self.setTime(startingDate, endingDate);
}
});
};
var json = [{
name: "Gregor Mendel Publishes His Paper on Heredity",
startDate: "1866-2-20",
description: 'Gregor Mendel, an Augustinian monk, publishes his paper, "Versuche über Pflanzenhybriden" ("Experiments on Plant Hybridization"), containing his findings on heredity in the journal Proceedings of the Natural History Society of Brunn. These findings, which demonstrate that inheritance follows particular laws, emerged from years of observations breeding pea plants at the experimental garden of the Augustinian Abbey of St. Thomas in Brno.',
type: 'text'
},{
name: "Francis Galton Publishes <em>Hereditary Genius.</em>",
startDate: "1869",
description: 'Francis Galton publishes his influential book Hereditary Genius. Within it, he attempts to understand the heritability of human intelligence from a social sciences perspective. This volume proved a cornerstone of the nascent eugenics movement.',
type: 'text'
},
{
name: "Gregor Mendel Publishes His Paper on Heredity",
startDate: "1866-2-20",
description: ' | this._rangeStart = newVal;
}
}
} | random_line_split |
timeline-custom-shiny.js |
obj.start = stringToDate(item.startDate);
obj.content = html;
processedDatas.push(obj);
}
this.parsedData = processedDatas;
return this;
};
timeline.prototype.drawVisualization = function() { // Create and populate a data table.
options = {
'height': "200px",
'width': "100%",
'start': new Date(visiblefirstdate, 0),
'end': new Date(visiblelastdate, 0),
'min': new Date(1850, 0, 0),
'max': new Date(2015, 0, 0),
'invervalMin': 1000 * 60 * 60 * 24 * 31 * 3,
'editable': false,
'animate': true,
'selectable': false,
'style': "box",
'showNavigation': true,
'showCurrentTime': false
};
// Instantiate our timeline object.
this.vis = new links.Timeline(document.getElementById('mytimeline'));
// Attach event listeners
links.events.addListener(this.vis, 'rangechange', onrangechange);
// Draw our timeline with the created data and options
this.vis.draw(this.parsedData, options);
//set the divs to the proper starting dates
document.getElementById('startDate').value = this.dateToString(vis.start);
document.getElementById('endDate').value = this.dateToString(vis.end);
};
timeline.prototype.setTime = function(startdate, enddate) {
if (!this.vis) return;
var newStartDate, newEndDate, sliderSet;
if (!startdate) {
newStartDate = new Date(stringToDate(document.getElementById('startDate').value));
newEndDate = new Date(stringToDate(document.getElementById('endDate').value));
sliderSet = false;
} else {
newStartDate = new Date(startdate, 0); //0 = working with years. should change this is parse
newEndDate = new Date(enddate, 0);
sliderSet = true;
}
this.vis.setVisibleChartRange(newStartDate, newEndDate);
this.onrangechange(sliderSet);
};
timeline.prototype.onRangeChange = function(sliderSet) {
var range = vis.getVisibleChartRange()
, totalStartDate = range.start
, totalEndDate = range.end;
document.getElementById('startDate').value = dateToString(range.start);
document.getElementById('endDate').value = dateToString(range.end);
if (sliderSet !== true) {
$("#slider-range").dragslider("option", "values", [this.dateToString(range.start), this.dateToString(range.end)]);
}
};
timeline.prototype.stringToDate = function(input,format) {
var stringparts = input.split('-');
if (stringparts.length == 1) {
return new Date(input, 0);
} else {
format = format || 'yyyy-mm-dd'; // default format
var parts = input.match(/(\d+)/g),
i = 0,
fmt = {};
// extract date-part indexes from the format
format.replace(/(yyyy|dd|mm)/g, function(part) {
fmt[part] = i++;
});
return new Date(parts[fmt['yyyy']], parts[fmt['mm']] - 1, parts[fmt['dd']]);
}
};
timeline.prototype.dateToString = function(date){
return date.getFullYear();
};
timeline.prototype.moveTimeline = function(degree){
this.vis.move(degree);
this.onRangeChange();
};
timeline.prototype.addCaption = function() {
$('a > img[style]').each(function() {
$el = $(this);
var style = $el.attr('style');
$el.attr('style', '');
$el.parent().attr('style', style);
}); //Moves the inline styles
$("img").each(function() {
var title = $(this).attr('alt');
$(this).after('<span class="caption">' + title + '</span>');
}); //Adds the dynamic captions.
};
timeline.prototype.drawSliderBackground = function() {
var wrapperWidth = $('#mytimelinewrapper').width();
$('#slider-range').css('width', wrapperWidth);
var numDivisions = 8
, pixelBlockSize = wrapperWidth / numDivisions
, yearBlockSize = ((endDate - startDate) / (numDivisions));
if ($('.slider-date').length > 0) {
$('.slider-date').remove();
}
for (var i = 1; i < numDivisions; i++) {
var pixelTick = pixelBlockSize * i
, yearTickDecimal = (yearBlockSize * i) + startDate
, yearTick = yearTickDecimal.toPrecision(4)
, element = document.createElement('div');
element.className = "slider-date";
element.style.position = "absolute";
element.style.left = pixelTick + 'px';
document.getElementById("slider").appendChild(element);
element.innerHTML = yearTick;
}
};
// Adds dragging feature to the jQuery UI Slider.
(function($, undefined) {
$.widget("ui.dragslider", $.ui.slider, {
options: $.extend({}, $.ui.slider.prototype.options, {
rangeDrag: false
}),
_create: function() {
$.ui.slider.prototype._create.apply(this, arguments);
this._rangeCapture = false;
},
_mouseCapture: function(event) {
var o = this.options;
if (o.disabled) return false;
if (event.target == this.range.get(0) && o.rangeDrag === true && o.range === true) {
this._rangeCapture = true;
this._rangeStart = null;
} else {
this._rangeCapture = false;
}
$.ui.slider.prototype._mouseCapture.apply(this, arguments);
if (this._rangeCapture == true) {
this.handles.removeClass("ui-state-active").blur();
}
return true;
},
_mouseStop: function(event) {
this._rangeStart = null;
return $.ui.slider.prototype._mouseStop.apply(this, arguments);
},
_slide: function(event, index, newVal) {
if (!this._rangeCapture) {
return $.ui.slider.prototype._slide.apply(this, arguments);
}
if (this._rangeStart == null) {
this._rangeStart = newVal;
}
var oldValLeft = this.options.values[0],
oldValRight = this.options.values[1],
slideDist = newVal - this._rangeStart,
newValueLeft = oldValLeft + slideDist,
newValueRight = oldValRight + slideDist,
allowed;
if (this.options.values && this.options.values.length) {
if (newValueRight > this._valueMax() && slideDist > 0) {
slideDist -= (newValueRight - this._valueMax());
newValueLeft = oldValLeft + slideDist;
newValueRight = oldValRight + slideDist;
}
if (newValueLeft < this._valueMin()) {
slideDist += (this._valueMin() - newValueLeft);
newValueLeft = oldValLeft + slideDist;
newValueRight = oldValRight + slideDist;
}
if (slideDist != 0) {
newValues = this.values();
newValues[0] = newValueLeft;
newValues[1] = newValueRight;
// A slide can be canceled by returning false from the slide callback
allowed = this._trigger("slide", event, {
handle: this.handles[index],
value: slideDist,
values: newValues
});
if (allowed !== false) {
this.values(0, newValueLeft, true);
this.values(1, newValueRight, true);
}
this._rangeStart = newVal;
}
}
}
});
})(jQuery);
timeline.prototype.addSlider = function(){
var self = this;
this.drawSliderBackground();
$("#slider-range").dragslider({
range: true,
min: self.startDate,
max: self.endDate,
animate: true,
rangeDrag: true,
values: [self.visibleFirstDate, self.visibleLastDate],
slide: function(event, ui) {
var startingDate = ui.values[0]
, endingDate = ui.values[1];
self.setTime(startingDate, endingDate);
}
});
};
var json = [{
name: "Gregor Mendel Publishes His Paper on Heredity",
startDate: "1866-2-20",
description: 'Gregor Mendel, an Augustinian monk, publishes his paper, "Versuche über Pflanzenhybriden" ("Experiments on Plant Hybridization"), containing his findings on heredity in the journal Proceedings of the Natural History Society of Brunn. These findings, which demonstrate that inheritance follows particular laws, emerged from years of observations breeding pea plants at the experimental garden of the Augustinian Abbey of St. Thomas in Brno.',
type: 'text'
},{
name: "Francis Galton Publishes <em>Hereditary Genius.</em>",
startDate: "1869",
description: 'Francis Galton publishes his influential book Hereditary Genius. Within it, he attempts to understand the heritability of human intelligence from a social sciences perspective. This volume proved a cornerstone of the nascent eugenics movement.',
type: 'text'
},
{
name: "Gregor Mendel Publishes His Paper on Heredity",
| {
obj.end = stringToDate(item.endDate);
} | conditional_block |
|
api_op_CreateGovCloudAccount.go | and support purposes.
// The account in the commercial Region is automatically a member of the
// organization whose credentials made the request. Both accounts are associated
// with the same email address. A role is created in the new account in the
// commercial Region that allows the management account in the organization in the
// commercial Region to assume it. An Amazon Web Services GovCloud (US) account is
// then created and associated with the commercial account that you just created. A
// role is also created in the new Amazon Web Services GovCloud (US) account that
// can be assumed by the Amazon Web Services GovCloud (US) account that is
// associated with the management account of the commercial organization. For more
// information and to view a diagram that explains how account access works, see
// Organizations (https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html)
// in the Amazon Web Services GovCloud User Guide. For more information about
// creating accounts, see Creating a member account in your organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
// in the Organizations User Guide.
// - When you create an account in an organization using the Organizations
// console, API, or CLI commands, the information required for the account to
// operate as a standalone account is not automatically collected. This includes a
// payment method and signing the end user license agreement (EULA). If you must
// remove an account from your organization later, you can do so only after you
// provide the missing information. For more information, see Considerations
// before removing an account from an organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_account-before-remove.html)
// in the Organizations User Guide.
// - If you get an exception that indicates that you exceeded your account
// limits for the organization, contact Amazon Web Services Support (https://console.aws.amazon.com/support/home#/)
// .
// - If you get an exception that indicates that the operation failed because
// your organization is still initializing, wait one hour and then try again. If
// the error persists, contact Amazon Web Services Support (https://console.aws.amazon.com/support/home#/)
// .
// - Using CreateGovCloudAccount to create multiple temporary accounts isn't
// recommended. You can only close an account from the Amazon Web Services Billing
// and Cost Management console, and you must be signed in as the root user. For
// information on the requirements and process for closing an account, see
// Closing a member account in your organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
// in the Organizations User Guide.
//
// When you create a member account with this operation, you can choose whether to
// create the account with the IAM User and Role Access to Billing Information
// switch enabled. If you enable it, IAM users and roles that have appropriate
// permissions can view billing information for the account. If you disable it,
// only the account root user can access billing information. For information about
// how to disable this switch for an account, see Granting access to your billing
// information and tools (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html)
// .
func (c *Client) CreateGovCloudAccount(ctx context.Context, params *CreateGovCloudAccountInput, optFns ...func(*Options)) (*CreateGovCloudAccountOutput, error) {
if params == nil {
params = &CreateGovCloudAccountInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateGovCloudAccount", params, optFns, c.addOperationCreateGovCloudAccountMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateGovCloudAccountOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateGovCloudAccountInput struct {
// The friendly name of the member account. The account name can consist of only
// the characters [a-z],[A-Z],[0-9], hyphen (-), or dot (.) You can't separate
// characters with a dash (–).
//
// This member is required.
AccountName *string
// Specifies the email address of the owner to assign to the new member account in
// the commercial Region. This email address must not already be associated with
// another Amazon Web Services account. You must use a valid email address to
// complete account creation. The rules for a valid email address:
// - The address must be a minimum of 6 and a maximum of 64 characters long.
// - All characters must be 7-bit ASCII characters.
// - There must be one and only one @ symbol, which separates the local name
// from the domain name.
// - The local name can't contain any of the following characters: whitespace, "
// ' ( ) < > [ ] : ; , \ | % &
// - The local name can't begin with a dot (.)
// - The domain name can consist of only the characters [a-z],[A-Z],[0-9],
// hyphen (-), or dot (.)
// - The domain name can't begin or end with a hyphen (-) or dot (.)
// - The domain name must contain at least one dot
// You can't access the root user of the account or remove an account that was
// created with an invalid email address. Like all request parameters for
// CreateGovCloudAccount , the request for the email address for the Amazon Web
// Services GovCloud (US) account originates from the commercial Region, not from
// the Amazon Web Services GovCloud (US) Region.
//
// This member is required.
Email *string
// If set to ALLOW , the new linked account in the commercial Region enables IAM
// users to access account billing information if they have the required
// permissions. If set to DENY , only the root user of the new account can access
// account billing information. For more information, see About IAM access to the
// Billing and Cost Management console (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
// in the Amazon Web Services Billing and Cost Management User Guide. If you don't
// specify this parameter, the value defaults to ALLOW , and IAM users and roles
// with the required permissions can access billing information for the new
// account.
IamUserAccessToBilling types.IAMUserAccessToBilling
// (Optional) The name of an IAM role that Organizations automatically
// preconfigures in the new member accounts in both the Amazon Web Services
// GovCloud (US) Region and in the commercial Region. This role trusts the
// management account, allowing users in the management account to assume the role,
// as permitted by the management account administrator. The role has administrator
// permissions in the new member account. If you don't specify this parameter, the
// role name defaults to OrganizationAccountAccessRole . For more information about
// how to use this role to access the member account, see the following links:
// - Creating the OrganizationAccountAccessRole in an invited member account (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role)
// in the Organizations User Guide
// - Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon Web Services
// accounts using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html)
// in the IAM User Guide
// The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate
// this parameter. The pattern can include uppercase letters, lowercase letters,
// digits with no spaces, and any of the following characters: =,.@-
RoleName *string
// A list of tags that you want to attach to the newly created account. These tags
// are attached to the commercial account associated with the GovCloud account, and
// not to the GovCloud account itself. To add tags to the actual GovCloud account,
// call the TagResource operation in the GovCloud region after the new GovCloud
// account exists. For each tag in the list, you must specify both a tag key and a
// value. You can set the value to an empty string, but you can't set it to null .
// For more information about tagging, see Tagging Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html)
// in the Organizations User Guide. If any one of the tags is not valid or if you
// exceed the maximum allowed number of tags for an account, then the entire
// request fails and the account is not created.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateGovCloudAccountOutput struct {
// Contains the status about a CreateAccount or CreateGovCloudAccount request to
// create an Amazon Web Services account or an Amazon Web Services GovCloud (US)
// account in an organization.
CreateAccountStatus *types.CreateAccountStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) ad | dOperationCreateGovCloudAccountMiddlewares(s | identifier_name |
|
api_op_CreateGovCloudAccount.go | string
// Specifies the email address of the owner to assign to the new member account in
// the commercial Region. This email address must not already be associated with
// another Amazon Web Services account. You must use a valid email address to
// complete account creation. The rules for a valid email address:
// - The address must be a minimum of 6 and a maximum of 64 characters long.
// - All characters must be 7-bit ASCII characters.
// - There must be one and only one @ symbol, which separates the local name
// from the domain name.
// - The local name can't contain any of the following characters: whitespace, "
// ' ( ) < > [ ] : ; , \ | % &
// - The local name can't begin with a dot (.)
// - The domain name can consist of only the characters [a-z],[A-Z],[0-9],
// hyphen (-), or dot (.)
// - The domain name can't begin or end with a hyphen (-) or dot (.)
// - The domain name must contain at least one dot
// You can't access the root user of the account or remove an account that was
// created with an invalid email address. Like all request parameters for
// CreateGovCloudAccount , the request for the email address for the Amazon Web
// Services GovCloud (US) account originates from the commercial Region, not from
// the Amazon Web Services GovCloud (US) Region.
//
// This member is required.
Email *string
// If set to ALLOW , the new linked account in the commercial Region enables IAM
// users to access account billing information if they have the required
// permissions. If set to DENY , only the root user of the new account can access
// account billing information. For more information, see About IAM access to the
// Billing and Cost Management console (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
// in the Amazon Web Services Billing and Cost Management User Guide. If you don't
// specify this parameter, the value defaults to ALLOW , and IAM users and roles
// with the required permissions can access billing information for the new
// account.
IamUserAccessToBilling types.IAMUserAccessToBilling
// (Optional) The name of an IAM role that Organizations automatically
// preconfigures in the new member accounts in both the Amazon Web Services
// GovCloud (US) Region and in the commercial Region. This role trusts the
// management account, allowing users in the management account to assume the role,
// as permitted by the management account administrator. The role has administrator
// permissions in the new member account. If you don't specify this parameter, the
// role name defaults to OrganizationAccountAccessRole . For more information about
// how to use this role to access the member account, see the following links:
// - Creating the OrganizationAccountAccessRole in an invited member account (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role)
// in the Organizations User Guide
// - Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon Web Services
// accounts using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html)
// in the IAM User Guide
// The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate
// this parameter. The pattern can include uppercase letters, lowercase letters,
// digits with no spaces, and any of the following characters: =,.@-
RoleName *string
// A list of tags that you want to attach to the newly created account. These tags
// are attached to the commercial account associated with the GovCloud account, and
// not to the GovCloud account itself. To add tags to the actual GovCloud account,
// call the TagResource operation in the GovCloud region after the new GovCloud
// account exists. For each tag in the list, you must specify both a tag key and a
// value. You can set the value to an empty string, but you can't set it to null .
// For more information about tagging, see Tagging Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html)
// in the Organizations User Guide. If any one of the tags is not valid or if you
// exceed the maximum allowed number of tags for an account, then the entire
// request fails and the account is not created.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateGovCloudAccountOutput struct {
// Contains the status about a CreateAccount or CreateGovCloudAccount request to
// create an Amazon Web Services account or an Amazon Web Services GovCloud (US)
// account in an organization.
CreateAccountStatus *types.CreateAccountStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateGovCloudAccountMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateGovCloudAccountResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateGovCloudAccountValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGovCloudAccount(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateGovCloudAccount(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "organizations",
OperationName: "CreateGovCloudAccount",
}
}
type opCreateGovCloudAccountResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateGovCloudAccountResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateGovCloudAccountResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k, | resolvedEndpoint.Headers.Get(k),
)
}
| random_line_split |
|
api_op_CreateGovCloudAccount.go | ://console.aws.amazon.com/support/home#/)
// .
// - Using CreateGovCloudAccount to create multiple temporary accounts isn't
// recommended. You can only close an account from the Amazon Web Services Billing
// and Cost Management console, and you must be signed in as the root user. For
// information on the requirements and process for closing an account, see
// Closing a member account in your organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
// in the Organizations User Guide.
//
// When you create a member account with this operation, you can choose whether to
// create the account with the IAM User and Role Access to Billing Information
// switch enabled. If you enable it, IAM users and roles that have appropriate
// permissions can view billing information for the account. If you disable it,
// only the account root user can access billing information. For information about
// how to disable this switch for an account, see Granting access to your billing
// information and tools (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html)
// .
func (c *Client) CreateGovCloudAccount(ctx context.Context, params *CreateGovCloudAccountInput, optFns ...func(*Options)) (*CreateGovCloudAccountOutput, error) {
if params == nil {
params = &CreateGovCloudAccountInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateGovCloudAccount", params, optFns, c.addOperationCreateGovCloudAccountMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateGovCloudAccountOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateGovCloudAccountInput struct {
// The friendly name of the member account. The account name can consist of only
// the characters [a-z],[A-Z],[0-9], hyphen (-), or dot (.) You can't separate
// characters with a dash (–).
//
// This member is required.
AccountName *string
// Specifies the email address of the owner to assign to the new member account in
// the commercial Region. This email address must not already be associated with
// another Amazon Web Services account. You must use a valid email address to
// complete account creation. The rules for a valid email address:
// - The address must be a minimum of 6 and a maximum of 64 characters long.
// - All characters must be 7-bit ASCII characters.
// - There must be one and only one @ symbol, which separates the local name
// from the domain name.
// - The local name can't contain any of the following characters: whitespace, "
// ' ( ) < > [ ] : ; , \ | % &
// - The local name can't begin with a dot (.)
// - The domain name can consist of only the characters [a-z],[A-Z],[0-9],
// hyphen (-), or dot (.)
// - The domain name can't begin or end with a hyphen (-) or dot (.)
// - The domain name must contain at least one dot
// You can't access the root user of the account or remove an account that was
// created with an invalid email address. Like all request parameters for
// CreateGovCloudAccount , the request for the email address for the Amazon Web
// Services GovCloud (US) account originates from the commercial Region, not from
// the Amazon Web Services GovCloud (US) Region.
//
// This member is required.
Email *string
// If set to ALLOW , the new linked account in the commercial Region enables IAM
// users to access account billing information if they have the required
// permissions. If set to DENY , only the root user of the new account can access
// account billing information. For more information, see About IAM access to the
// Billing and Cost Management console (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
// in the Amazon Web Services Billing and Cost Management User Guide. If you don't
// specify this parameter, the value defaults to ALLOW , and IAM users and roles
// with the required permissions can access billing information for the new
// account.
IamUserAccessToBilling types.IAMUserAccessToBilling
// (Optional) The name of an IAM role that Organizations automatically
// preconfigures in the new member accounts in both the Amazon Web Services
// GovCloud (US) Region and in the commercial Region. This role trusts the
// management account, allowing users in the management account to assume the role,
// as permitted by the management account administrator. The role has administrator
// permissions in the new member account. If you don't specify this parameter, the
// role name defaults to OrganizationAccountAccessRole . For more information about
// how to use this role to access the member account, see the following links:
// - Creating the OrganizationAccountAccessRole in an invited member account (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role)
// in the Organizations User Guide
// - Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon Web Services
// accounts using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html)
// in the IAM User Guide
// The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate
// this parameter. The pattern can include uppercase letters, lowercase letters,
// digits with no spaces, and any of the following characters: =,.@-
RoleName *string
// A list of tags that you want to attach to the newly created account. These tags
// are attached to the commercial account associated with the GovCloud account, and
// not to the GovCloud account itself. To add tags to the actual GovCloud account,
// call the TagResource operation in the GovCloud region after the new GovCloud
// account exists. For each tag in the list, you must specify both a tag key and a
// value. You can set the value to an empty string, but you can't set it to null .
// For more information about tagging, see Tagging Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html)
// in the Organizations User Guide. If any one of the tags is not valid or if you
// exceed the maximum allowed number of tags for an account, then the entire
// request fails and the account is not created.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateGovCloudAccountOutput struct {
// Contains the status about a CreateAccount or CreateGovCloudAccount request to
// create an Amazon Web Services account or an Amazon Web Services GovCloud (US)
// account in an organization.
CreateAccountStatus *types.CreateAccountStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateGovCloudAccountMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateGovCloudAccountResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateGovCloudAccountValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGovCloudAccount(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
| return err
}
| conditional_block |
|
api_op_CreateGovCloudAccount.go | it, IAM users and roles that have appropriate
// permissions can view billing information for the account. If you disable it,
// only the account root user can access billing information. For information about
// how to disable this switch for an account, see Granting access to your billing
// information and tools (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html)
// .
func (c *Client) CreateGovCloudAccount(ctx context.Context, params *CreateGovCloudAccountInput, optFns ...func(*Options)) (*CreateGovCloudAccountOutput, error) {
if params == nil {
params = &CreateGovCloudAccountInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateGovCloudAccount", params, optFns, c.addOperationCreateGovCloudAccountMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateGovCloudAccountOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateGovCloudAccountInput struct {
// The friendly name of the member account. The account name can consist of only
// the characters [a-z],[A-Z],[0-9], hyphen (-), or dot (.) You can't separate
// characters with a dash (–).
//
// This member is required.
AccountName *string
// Specifies the email address of the owner to assign to the new member account in
// the commercial Region. This email address must not already be associated with
// another Amazon Web Services account. You must use a valid email address to
// complete account creation. The rules for a valid email address:
// - The address must be a minimum of 6 and a maximum of 64 characters long.
// - All characters must be 7-bit ASCII characters.
// - There must be one and only one @ symbol, which separates the local name
// from the domain name.
// - The local name can't contain any of the following characters: whitespace, "
// ' ( ) < > [ ] : ; , \ | % &
// - The local name can't begin with a dot (.)
// - The domain name can consist of only the characters [a-z],[A-Z],[0-9],
// hyphen (-), or dot (.)
// - The domain name can't begin or end with a hyphen (-) or dot (.)
// - The domain name must contain at least one dot
// You can't access the root user of the account or remove an account that was
// created with an invalid email address. Like all request parameters for
// CreateGovCloudAccount , the request for the email address for the Amazon Web
// Services GovCloud (US) account originates from the commercial Region, not from
// the Amazon Web Services GovCloud (US) Region.
//
// This member is required.
Email *string
// If set to ALLOW , the new linked account in the commercial Region enables IAM
// users to access account billing information if they have the required
// permissions. If set to DENY , only the root user of the new account can access
// account billing information. For more information, see About IAM access to the
// Billing and Cost Management console (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
// in the Amazon Web Services Billing and Cost Management User Guide. If you don't
// specify this parameter, the value defaults to ALLOW , and IAM users and roles
// with the required permissions can access billing information for the new
// account.
IamUserAccessToBilling types.IAMUserAccessToBilling
// (Optional) The name of an IAM role that Organizations automatically
// preconfigures in the new member accounts in both the Amazon Web Services
// GovCloud (US) Region and in the commercial Region. This role trusts the
// management account, allowing users in the management account to assume the role,
// as permitted by the management account administrator. The role has administrator
// permissions in the new member account. If you don't specify this parameter, the
// role name defaults to OrganizationAccountAccessRole . For more information about
// how to use this role to access the member account, see the following links:
// - Creating the OrganizationAccountAccessRole in an invited member account (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role)
// in the Organizations User Guide
// - Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon Web Services
// accounts using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html)
// in the IAM User Guide
// The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate
// this parameter. The pattern can include uppercase letters, lowercase letters,
// digits with no spaces, and any of the following characters: =,.@-
RoleName *string
// A list of tags that you want to attach to the newly created account. These tags
// are attached to the commercial account associated with the GovCloud account, and
// not to the GovCloud account itself. To add tags to the actual GovCloud account,
// call the TagResource operation in the GovCloud region after the new GovCloud
// account exists. For each tag in the list, you must specify both a tag key and a
// value. You can set the value to an empty string, but you can't set it to null .
// For more information about tagging, see Tagging Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html)
// in the Organizations User Guide. If any one of the tags is not valid or if you
// exceed the maximum allowed number of tags for an account, then the entire
// request fails and the account is not created.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateGovCloudAccountOutput struct {
// Contains the status about a CreateAccount or CreateGovCloudAccount request to
// create an Amazon Web Services account or an Amazon Web Services GovCloud (US)
// account in an organization.
CreateAccountStatus *types.CreateAccountStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateGovCloudAccountMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGovCloudAccount{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateGovCloudAccountResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateGovCloudAccountValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGovCloudAccount(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateGovCloudAccount(region string) *awsmiddleware.RegisterServiceMetadata {
| return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "organizations",
OperationName: "CreateGovCloudAccount",
}
}
| identifier_body |
|
lib.rs | [`str::to_lowercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_lowercase
//! [`str::to_uppercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_uppercase
//!
//! <br>
//!
//! # Pasting documentation strings
//!
//! Within the `paste!` macro, arguments to a #\[doc ...\] attribute are
//! implicitly concatenated together to form a coherent documentation string.
//!
//! ```
//! use paste::paste;
//!
//! macro_rules! method_new {
//! ($ret:ident) => {
//! paste! {
//! #[doc = "Create a new `" $ret "` object."]
//! pub fn new() -> $ret { todo!() }
//! }
//! };
//! }
//!
//! pub struct Paste {}
//!
//! method_new!(Paste); // expands to #[doc = "Create a new `Paste` object"]
//! ```
#![doc(html_root_url = "https://docs.rs/paste/1.0.14")]
#![allow(
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::match_same_arms,
clippy::module_name_repetitions,
clippy::needless_doctest_main,
clippy::too_many_lines
)]
extern crate proc_macro;
mod attr;
mod error;
mod segment;
use crate::attr::expand_attr;
use crate::error::{Error, Result};
use crate::segment::Segment;
use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree};
use std::char;
use std::iter;
use std::panic;
#[proc_macro]
pub fn paste(input: TokenStream) -> TokenStream {
let mut contains_paste = false;
let flatten_single_interpolation = true;
match expand(
input.clone(),
&mut contains_paste,
flatten_single_interpolation,
) {
Ok(expanded) => {
if contains_paste {
expanded
} else {
input
}
}
Err(err) => err.to_compile_error(),
}
}
#[doc(hidden)]
#[proc_macro]
pub fn item(input: TokenStream) -> TokenStream {
paste(input)
}
#[doc(hidden)]
#[proc_macro]
pub fn expr(input: TokenStream) -> TokenStream {
paste(input)
}
fn expand(
input: TokenStream,
contains_paste: &mut bool,
flatten_single_interpolation: bool,
) -> Result<TokenStream> {
let mut expanded = TokenStream::new();
let mut lookbehind = Lookbehind::Other;
let mut prev_none_group = None::<Group>;
let mut tokens = input.into_iter().peekable();
loop {
let token = tokens.next();
if let Some(group) = prev_none_group.take() {
if match (&token, tokens.peek()) {
(Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => {
fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint
}
_ => false,
} {
expanded.extend(group.stream());
*contains_paste = true;
} else {
expanded.extend(iter::once(TokenTree::Group(group)));
}
}
match token {
Some(TokenTree::Group(group)) => {
let delimiter = group.delimiter();
let content = group.stream();
let span = group.span();
if delimiter == Delimiter::Bracket && is_paste_operation(&content) {
let segments = parse_bracket_as_segments(content, span)?;
let pasted = segment::paste(&segments)?;
let tokens = pasted_to_tokens(pasted, span)?;
expanded.extend(tokens);
*contains_paste = true;
} else if flatten_single_interpolation
&& delimiter == Delimiter::None
&& is_single_interpolation_group(&content)
{
expanded.extend(content);
*contains_paste = true;
} else {
let mut group_contains_paste = false;
let is_attribute = delimiter == Delimiter::Bracket
&& (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang);
let mut nested = expand(
content,
&mut group_contains_paste,
flatten_single_interpolation && !is_attribute,
)?;
if is_attribute {
nested = expand_attr(nested, span, &mut group_contains_paste)?;
}
let group = if group_contains_paste {
let mut group = Group::new(delimiter, nested);
group.set_span(span);
*contains_paste = true;
group
} else {
group.clone()
};
if delimiter != Delimiter::None {
expanded.extend(iter::once(TokenTree::Group(group)));
} else if lookbehind == Lookbehind::DoubleColon {
expanded.extend(group.stream());
*contains_paste = true;
} else {
prev_none_group = Some(group);
}
}
lookbehind = Lookbehind::Other;
}
Some(TokenTree::Punct(punct)) => {
lookbehind = match punct.as_char() {
':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon,
':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon,
'#' => Lookbehind::Pound,
'!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang,
_ => Lookbehind::Other,
};
expanded.extend(iter::once(TokenTree::Punct(punct)));
}
Some(other) => {
lookbehind = Lookbehind::Other;
expanded.extend(iter::once(other));
}
None => return Ok(expanded),
}
}
}
#[derive(PartialEq)]
enum Lookbehind {
JointColon,
DoubleColon,
Pound,
PoundBang,
Other,
}
// https://github.com/dtolnay/paste/issues/26
fn is_single_interpolation_group(input: &TokenStream) -> bool {
#[derive(PartialEq)]
enum State {
Init,
Ident,
Literal,
Apostrophe,
Lifetime,
Colon1,
Colon2,
}
let mut state = State::Init;
for tt in input.clone() {
state = match (state, &tt) {
(State::Init, TokenTree::Ident(_)) => State::Ident,
(State::Init, TokenTree::Literal(_)) => State::Literal,
(State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe,
(State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime,
(State::Ident, TokenTree::Punct(punct))
if punct.as_char() == ':' && punct.spacing() == Spacing::Joint =>
{
State::Colon1
}
(State::Colon1, TokenTree::Punct(punct))
if punct.as_char() == ':' && punct.spacing() == Spacing::Alone =>
{
State::Colon2
}
(State::Colon2, TokenTree::Ident(_)) => State::Ident,
_ => return false,
};
}
state == State::Ident || state == State::Literal || state == State::Lifetime
}
fn is_paste_operation(input: &TokenStream) -> bool {
let mut tokens = input.clone().into_iter();
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {}
_ => return false,
}
let mut has_token = false;
loop {
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {
return has_token && tokens.next().is_none();
}
Some(_) => has_token = true,
None => return false,
}
}
}
fn parse_bracket_as_segments(input: TokenStream, scope: Span) -> Result<Vec<Segment>> {
let mut tokens = input.into_iter().peekable();
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {}
Some(wrong) => return Err(Error::new(wrong.span(), "expected `<`")),
None => return Err(Error::new(scope, "expected `[< ... >]`")),
}
let mut segments = segment::parse(&mut tokens)?;
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {}
Some(wrong) => return Err(Error::new(wrong.span(), "expected `>`")),
None => return Err(Error::new(scope, "expected `[< ... >]`")),
}
if let Some(unexpected) = tokens.next() {
return Err(Error::new(
unexpected.span(),
"unexpected input, expected `[< ... >]`",
));
}
for segment in &mut segments {
if let Segment::String(string) = segment {
if string.value.starts_with("'\\u{") {
let hex = &string.value[4..string.value.len() - 2];
if let Ok(unsigned) = u32::from_str_radix(hex, 16) {
if let Some(ch) = char::from_u32(unsigned) {
string.value.clear();
string.value.push(ch); | continue;
}
}
} | random_line_split |
|
lib.rs | Tree};
use std::char;
use std::iter;
use std::panic;
#[proc_macro]
pub fn paste(input: TokenStream) -> TokenStream {
let mut contains_paste = false;
let flatten_single_interpolation = true;
match expand(
input.clone(),
&mut contains_paste,
flatten_single_interpolation,
) {
Ok(expanded) => {
if contains_paste {
expanded
} else {
input
}
}
Err(err) => err.to_compile_error(),
}
}
#[doc(hidden)]
#[proc_macro]
pub fn item(input: TokenStream) -> TokenStream {
paste(input)
}
#[doc(hidden)]
#[proc_macro]
pub fn expr(input: TokenStream) -> TokenStream {
paste(input)
}
fn expand(
input: TokenStream,
contains_paste: &mut bool,
flatten_single_interpolation: bool,
) -> Result<TokenStream> {
let mut expanded = TokenStream::new();
let mut lookbehind = Lookbehind::Other;
let mut prev_none_group = None::<Group>;
let mut tokens = input.into_iter().peekable();
loop {
let token = tokens.next();
if let Some(group) = prev_none_group.take() {
if match (&token, tokens.peek()) {
(Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => {
fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint
}
_ => false,
} {
expanded.extend(group.stream());
*contains_paste = true;
} else {
expanded.extend(iter::once(TokenTree::Group(group)));
}
}
match token {
Some(TokenTree::Group(group)) => {
let delimiter = group.delimiter();
let content = group.stream();
let span = group.span();
if delimiter == Delimiter::Bracket && is_paste_operation(&content) {
let segments = parse_bracket_as_segments(content, span)?;
let pasted = segment::paste(&segments)?;
let tokens = pasted_to_tokens(pasted, span)?;
expanded.extend(tokens);
*contains_paste = true;
} else if flatten_single_interpolation
&& delimiter == Delimiter::None
&& is_single_interpolation_group(&content)
{
expanded.extend(content);
*contains_paste = true;
} else {
let mut group_contains_paste = false;
let is_attribute = delimiter == Delimiter::Bracket
&& (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang);
let mut nested = expand(
content,
&mut group_contains_paste,
flatten_single_interpolation && !is_attribute,
)?;
if is_attribute {
nested = expand_attr(nested, span, &mut group_contains_paste)?;
}
let group = if group_contains_paste {
let mut group = Group::new(delimiter, nested);
group.set_span(span);
*contains_paste = true;
group
} else {
group.clone()
};
if delimiter != Delimiter::None {
expanded.extend(iter::once(TokenTree::Group(group)));
} else if lookbehind == Lookbehind::DoubleColon {
expanded.extend(group.stream());
*contains_paste = true;
} else {
prev_none_group = Some(group);
}
}
lookbehind = Lookbehind::Other;
}
Some(TokenTree::Punct(punct)) => {
lookbehind = match punct.as_char() {
':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon,
':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon,
'#' => Lookbehind::Pound,
'!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang,
_ => Lookbehind::Other,
};
expanded.extend(iter::once(TokenTree::Punct(punct)));
}
Some(other) => {
lookbehind = Lookbehind::Other;
expanded.extend(iter::once(other));
}
None => return Ok(expanded),
}
}
}
#[derive(PartialEq)]
enum Lookbehind {
JointColon,
DoubleColon,
Pound,
PoundBang,
Other,
}
// https://github.com/dtolnay/paste/issues/26
fn is_single_interpolation_group(input: &TokenStream) -> bool {
#[derive(PartialEq)]
enum State {
Init,
Ident,
Literal,
Apostrophe,
Lifetime,
Colon1,
Colon2,
}
let mut state = State::Init;
for tt in input.clone() {
state = match (state, &tt) {
(State::Init, TokenTree::Ident(_)) => State::Ident,
(State::Init, TokenTree::Literal(_)) => State::Literal,
(State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe,
(State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime,
(State::Ident, TokenTree::Punct(punct))
if punct.as_char() == ':' && punct.spacing() == Spacing::Joint =>
{
State::Colon1
}
(State::Colon1, TokenTree::Punct(punct))
if punct.as_char() == ':' && punct.spacing() == Spacing::Alone =>
{
State::Colon2
}
(State::Colon2, TokenTree::Ident(_)) => State::Ident,
_ => return false,
};
}
state == State::Ident || state == State::Literal || state == State::Lifetime
}
fn is_paste_operation(input: &TokenStream) -> bool {
let mut tokens = input.clone().into_iter();
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {}
_ => return false,
}
let mut has_token = false;
loop {
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {
return has_token && tokens.next().is_none();
}
Some(_) => has_token = true,
None => return false,
}
}
}
fn parse_bracket_as_segments(input: TokenStream, scope: Span) -> Result<Vec<Segment>> {
let mut tokens = input.into_iter().peekable();
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {}
Some(wrong) => return Err(Error::new(wrong.span(), "expected `<`")),
None => return Err(Error::new(scope, "expected `[< ... >]`")),
}
let mut segments = segment::parse(&mut tokens)?;
match &tokens.next() {
Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {}
Some(wrong) => return Err(Error::new(wrong.span(), "expected `>`")),
None => return Err(Error::new(scope, "expected `[< ... >]`")),
}
if let Some(unexpected) = tokens.next() {
return Err(Error::new(
unexpected.span(),
"unexpected input, expected `[< ... >]`",
));
}
for segment in &mut segments {
if let Segment::String(string) = segment {
if string.value.starts_with("'\\u{") {
let hex = &string.value[4..string.value.len() - 2];
if let Ok(unsigned) = u32::from_str_radix(hex, 16) {
if let Some(ch) = char::from_u32(unsigned) {
string.value.clear();
string.value.push(ch);
continue;
}
}
}
if string.value.contains(&['#', '\\', '.', '+'][..])
|| string.value.starts_with("b'")
|| string.value.starts_with("b\"")
|| string.value.starts_with("br\"")
{
return Err(Error::new(string.span, "unsupported literal"));
}
let mut range = 0..string.value.len();
if string.value.starts_with("r\"") {
range.start += 2;
range.end -= 1;
} else if string.value.starts_with(&['"', '\''][..]) {
range.start += 1;
range.end -= 1;
}
string.value = string.value[range].replace('-', "_");
}
}
Ok(segments)
}
fn pasted_to_tokens(mut pasted: String, span: Span) -> Result<TokenStream> | {
let mut tokens = TokenStream::new();
#[cfg(not(no_literal_fromstr))]
{
use proc_macro::{LexError, Literal};
use std::str::FromStr;
if pasted.starts_with(|ch: char| ch.is_ascii_digit()) {
let literal = match panic::catch_unwind(|| Literal::from_str(&pasted)) {
Ok(Ok(literal)) => TokenTree::Literal(literal),
Ok(Err(LexError { .. })) | Err(_) => {
return Err(Error::new(
span,
&format!("`{:?}` is not a valid literal", pasted),
));
}
};
tokens.extend(iter::once(literal));
return Ok(tokens); | identifier_body |
|
lib.rs | elaborate example
//!
//! The next example shows a macro that generates accessor methods for some
//! struct fields. It demonstrates how you might find it useful to bundle a
//! paste invocation inside of a macro\_rules macro.
//!
//! ```
//! use paste::paste;
//!
//! macro_rules! make_a_struct_and_getters {
//! ($name:ident { $($field:ident),* }) => {
//! // Define a struct. This expands to:
//! //
//! // pub struct S {
//! // a: String,
//! // b: String,
//! // c: String,
//! // }
//! pub struct $name {
//! $(
//! $field: String,
//! )*
//! }
//!
//! // Build an impl block with getters. This expands to:
//! //
//! // impl S {
//! // pub fn get_a(&self) -> &str { &self.a }
//! // pub fn get_b(&self) -> &str { &self.b }
//! // pub fn get_c(&self) -> &str { &self.c }
//! // }
//! paste! {
//! impl $name {
//! $(
//! pub fn [<get_ $field>](&self) -> &str {
//! &self.$field
//! }
//! )*
//! }
//! }
//! }
//! }
//!
//! make_a_struct_and_getters!(S { a, b, c });
//!
//! fn call_some_getters(s: &S) -> bool {
//! s.get_a() == s.get_b() && s.get_c().is_empty()
//! }
//! #
//! # fn main() {}
//! ```
//!
//! <br><br>
//!
//! # Case conversion
//!
//! Use `$var:lower` or `$var:upper` in the segment list to convert an
//! interpolated segment to lower- or uppercase as part of the paste. For
//! example, `[<ld_ $reg:lower _expr>]` would paste to `ld_bc_expr` if invoked
//! with $reg=`Bc`.
//!
//! Use `$var:snake` to convert CamelCase input to snake\_case.
//! Use `$var:camel` to convert snake\_case to CamelCase.
//! These compose, so for example `$var:snake:upper` would give you SCREAMING\_CASE.
//!
//! The precise Unicode conversions are as defined by [`str::to_lowercase`] and
//! [`str::to_uppercase`].
//!
//! [`str::to_lowercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_lowercase
//! [`str::to_uppercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_uppercase
//!
//! <br>
//!
//! # Pasting documentation strings
//!
//! Within the `paste!` macro, arguments to a #\[doc ...\] attribute are
//! implicitly concatenated together to form a coherent documentation string.
//!
//! ```
//! use paste::paste;
//!
//! macro_rules! method_new {
//! ($ret:ident) => {
//! paste! {
//! #[doc = "Create a new `" $ret "` object."]
//! pub fn new() -> $ret { todo!() }
//! }
//! };
//! }
//!
//! pub struct Paste {}
//!
//! method_new!(Paste); // expands to #[doc = "Create a new `Paste` object"]
//! ```
#![doc(html_root_url = "https://docs.rs/paste/1.0.14")]
#![allow(
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::match_same_arms,
clippy::module_name_repetitions,
clippy::needless_doctest_main,
clippy::too_many_lines
)]
extern crate proc_macro;
mod attr;
mod error;
mod segment;
use crate::attr::expand_attr;
use crate::error::{Error, Result};
use crate::segment::Segment;
use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree};
use std::char;
use std::iter;
use std::panic;
#[proc_macro]
pub fn paste(input: TokenStream) -> TokenStream {
let mut contains_paste = false;
let flatten_single_interpolation = true;
match expand(
input.clone(),
&mut contains_paste,
flatten_single_interpolation,
) {
Ok(expanded) => {
if contains_paste {
expanded
} else {
input
}
}
Err(err) => err.to_compile_error(),
}
}
#[doc(hidden)]
#[proc_macro]
pub fn item(input: TokenStream) -> TokenStream {
paste(input)
}
#[doc(hidden)]
#[proc_macro]
pub fn expr(input: TokenStream) -> TokenStream {
paste(input)
}
fn expand(
input: TokenStream,
contains_paste: &mut bool,
flatten_single_interpolation: bool,
) -> Result<TokenStream> {
let mut expanded = TokenStream::new();
let mut lookbehind = Lookbehind::Other;
let mut prev_none_group = None::<Group>;
let mut tokens = input.into_iter().peekable();
loop {
let token = tokens.next();
if let Some(group) = prev_none_group.take() {
if match (&token, tokens.peek()) {
(Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => {
fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint
}
_ => false,
} {
expanded.extend(group.stream());
*contains_paste = true;
} else {
expanded.extend(iter::once(TokenTree::Group(group)));
}
}
match token {
Some(TokenTree::Group(group)) => {
let delimiter = group.delimiter();
let content = group.stream();
let span = group.span();
if delimiter == Delimiter::Bracket && is_paste_operation(&content) {
let segments = parse_bracket_as_segments(content, span)?;
let pasted = segment::paste(&segments)?;
let tokens = pasted_to_tokens(pasted, span)?;
expanded.extend(tokens);
*contains_paste = true;
} else if flatten_single_interpolation
&& delimiter == Delimiter::None
&& is_single_interpolation_group(&content)
{
expanded.extend(content);
*contains_paste = true;
} else {
let mut group_contains_paste = false;
let is_attribute = delimiter == Delimiter::Bracket
&& (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang);
let mut nested = expand(
content,
&mut group_contains_paste,
flatten_single_interpolation && !is_attribute,
)?;
if is_attribute {
nested = expand_attr(nested, span, &mut group_contains_paste)?;
}
let group = if group_contains_paste {
let mut group = Group::new(delimiter, nested);
group.set_span(span);
*contains_paste = true;
group
} else {
group.clone()
};
if delimiter != Delimiter::None {
expanded.extend(iter::once(TokenTree::Group(group)));
} else if lookbehind == Lookbehind::DoubleColon {
expanded.extend(group.stream());
*contains_paste = true;
} else {
prev_none_group = Some(group);
}
}
lookbehind = Lookbehind::Other;
}
Some(TokenTree::Punct(punct)) => {
lookbehind = match punct.as_char() {
':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon,
':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon,
'#' => Lookbehind::Pound,
'!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang,
_ => Lookbehind::Other,
};
expanded.extend(iter::once(TokenTree::Punct(punct)));
}
Some(other) => {
lookbehind = Lookbehind::Other;
expanded.extend(iter::once(other));
}
None => return Ok(expanded),
}
}
}
#[derive(PartialEq)]
enum Lookbehind {
JointColon,
DoubleColon,
Pound,
PoundBang,
Other,
}
// https://github.com/dtolnay/paste/issues/26
fn is_single_interpolation_group(input: &TokenStream) -> bool {
#[derive(PartialEq)]
enum | {
Init,
Ident,
Literal,
Apostrophe,
Lifetime,
Colon1,
Colon2,
}
let mut state = State::Init;
for tt in input.clone() {
state = match (state, &tt) {
(State::Init, TokenTree::Ident(_)) => State::Ident,
(State::Init, TokenTree::Literal(_)) => State::Literal,
(State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe,
(State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime,
(State::Ident, TokenTree::Punct(punct))
if punct.as_char() == ':' && punct.spacing() == Spacing::Joint =>
{
State::Colon1
}
(State::Colon1, TokenTree::Punct(punct | State | identifier_name |
k_means_anchor_points.py | )
if avg_iou > best_avg_iou:
best_avg_iou = avg_iou
best_clusters = clusters
best_avg_iou_iteration = iter_count
print(f"\nIteration {iter_count}")
print(f"Average iou to closest centroid = {avg_iou}")
print(f"Sum of all distances (cost) = {np.sum(clusters_niou)}")
new_centroids = np.array([np.mean(c, axis=0) for c in clusters])
isect = np.prod(np.min(np.asarray([centroids, new_centroids]), axis=0), axis=1)
aa1 = np.prod(centroids, axis=1)
aa2 = np.prod(new_centroids, axis=1)
shifts = 1 - isect / (aa1 + aa2 - isect)
# for i, s in enumerate(shifts):
# print("{}: Cluster size: {}, Centroid distance shift: {}".format(i, len(clusters[i]), s))
if sum(shifts) == 0 or iter_count >= best_avg_iou_iteration + iteration_cutoff:
break
centroids = new_centroids
iter_count += 1
# Get anchor boxes from best clusters
anchors = np.asarray([np.mean(cluster, axis=0) for cluster in best_clusters])
anchors = anchors[anchors[:, 0].argsort()]
print(f"k-means clustering pascal anchor points (original coordinates) \
\nFound at iteration {best_avg_iou_iteration} with best average IoU: {best_avg_iou} \
\n{anchors*feature_size}")
return anchors
def plot_anchors(pascal_anchors, coco_anchors):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_ylim([0, 500])
ax1.set_xlim([0, 900])
for i in range(len(pascal_anchors)):
if area(pascal_anchors[i]) > area(coco_anchors[i]):
bbox1 = pascal_anchors[i]
color1 = "green"
bbox2 = coco_anchors[i]
color2 = "blue"
else:
bbox1 = coco_anchors[i]
color1 = "blue"
bbox2 = pascal_anchors[i]
color2 = "green"
lrx = bbox1[0]-(bbox1[2]/2.0)
lry = bbox1[1]-(bbox1[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox1[2], bbox1[3], facecolor=color1))
lrx = bbox2[0]-(bbox2[2]/2.0)
lry = bbox2[1]-(bbox2[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox2[2], bbox2[3], facecolor=color2))
plt.show()
def load_fgvc_dataset():
name = 'fgvc-aircraft-2013b'
data = []
bboxes = {}
sizes = {}
source_dir = os.path.join(IO.data_source_dir, 'FGVC', name, 'data')
source_imgdir = os.path.join(source_dir, 'images')
with open(source_imgdir + '_box.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, bbox = line.split(' ', 1)
bboxes[image_id] = list(map(int, bbox.split()))
with open(source_imgdir + '_size.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, size = line.split(' ', 1)
sizes[image_id] = list(map(int, size.split()))
for key in bboxes.keys():
size = sizes[key]
bbox = bboxes[key]
bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet')
data.append(bb[2:])
return np.array(data)
def load_pascal_dataset():
name = 'pascal'
data = []
for year, image_set in datasets:
img_ids_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/ImageSets/Main/{image_set}.txt'
ifs_img_ids = open(img_ids_filename)
img_ids = ifs_img_ids.read().strip().split()
for image_id in img_ids:
anno_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/Annotations/{image_id}.xml'
ifs_anno = open(anno_filename)
tree = ET.parse(ifs_anno)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text),
float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert_bbox((w, h), b)
data.append(bb[2:])
ifs_anno.close()
ifs_img_ids.close()
return np.array(data)
def load_coco_dataset():
| h = img['height']
for ann in anns:
b = ann['bbox']
bb = convert_coco_bbox((w, h), b)
data.append(bb[2:])
return np.array(data)
if __name__ == "__main__":
# examples
# k, pascal, coco
# 1, 0.30933335617, 0.252004954777
# 2, 0.45787906725, 0.365835079771
# 3, 0.53198291772, 0.453180358467
# 4, 0.57562962803, 0.500282182136
# 5, 0.58694643198, 0.522010174068
# 6, 0.61789602056, 0.549904351137
# 7, 0.63443906479, 0.569485509501
# 8, 0.65114747974, 0.585718648162
# 9, 0.66393113546, 0.601564171461
# k-means picking the first k points as centroids
img_size = 416
k = 5
# random_data = np.random.random((1000, 2))
# centroids = np.random.random((k, 2))
# random_anchors = kmeans_iou(k, centroids, random_data)
source_dir = IO.data_source_dir
datasets = ('train', 'val', 'test')
fgvc_data = load_fgvc_dataset()
centroids = fgvc_data[np.random.choice(np.arange(len(fgvc_data)), k, replace=False)]
fgvc_anchors = kmeans_iou(k, centroids, fgvc_data, feature_size=img_size / 32)
# datasets = (('2007', 'train'), ('2007', 'val'), ('2012', 'train'), ('2012', 'val'))
# pascal_data = load_pascal_dataset()
# centroids = pascal_data[np.random.choice(np.arange(len(pascal_data)), k, replace=False)]
# # centroids = pascal_data[:k]
# pascal_anchors = kmeans_iou(k, centroids, pascal_data, feature_size=img_size / 32)
# datasets = ('train2014', 'val2014')
# # datasets = ('test2014', 'test2 | name = 'coco'
data = []
for dataset in datasets:
annfile = f'{source_dir}/{name}/annotations/instances_{dataset}.json'
coco = COCO(annfile)
cats = coco.loadCats(coco.getCatIds())
base_classes = {cat['id']: cat['name'] for cat in cats}
img_id_set = set()
for cat_ids in iter(base_classes.keys()):
img_ids = coco.getImgIds(catIds=cat_ids)
img_id_set = img_id_set.union(set(img_ids))
image_ids = list(img_id_set)
for image_id in image_ids:
annIds = coco.getAnnIds(imgIds=image_id)
anns = coco.loadAnns(annIds)
img = coco.loadImgs(image_id)[0]
w = img['width'] | identifier_body |
k_means_anchor_points.py | if avg_iou > best_avg_iou:
best_avg_iou = avg_iou
best_clusters = clusters
best_avg_iou_iteration = iter_count
print(f"\nIteration {iter_count}")
print(f"Average iou to closest centroid = {avg_iou}")
print(f"Sum of all distances (cost) = {np.sum(clusters_niou)}")
new_centroids = np.array([np.mean(c, axis=0) for c in clusters])
isect = np.prod(np.min(np.asarray([centroids, new_centroids]), axis=0), axis=1)
aa1 = np.prod(centroids, axis=1)
aa2 = np.prod(new_centroids, axis=1)
shifts = 1 - isect / (aa1 + aa2 - isect)
# for i, s in enumerate(shifts):
# print("{}: Cluster size: {}, Centroid distance shift: {}".format(i, len(clusters[i]), s))
if sum(shifts) == 0 or iter_count >= best_avg_iou_iteration + iteration_cutoff:
break
centroids = new_centroids
iter_count += 1
# Get anchor boxes from best clusters
anchors = np.asarray([np.mean(cluster, axis=0) for cluster in best_clusters])
anchors = anchors[anchors[:, 0].argsort()]
print(f"k-means clustering pascal anchor points (original coordinates) \
\nFound at iteration {best_avg_iou_iteration} with best average IoU: {best_avg_iou} \
\n{anchors*feature_size}")
return anchors
def plot_anchors(pascal_anchors, coco_anchors):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_ylim([0, 500])
ax1.set_xlim([0, 900])
for i in range(len(pascal_anchors)):
if area(pascal_anchors[i]) > area(coco_anchors[i]):
bbox1 = pascal_anchors[i]
color1 = "green"
bbox2 = coco_anchors[i]
color2 = "blue"
else:
bbox1 = coco_anchors[i]
color1 = "blue"
bbox2 = pascal_anchors[i]
color2 = "green"
lrx = bbox1[0]-(bbox1[2]/2.0)
lry = bbox1[1]-(bbox1[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox1[2], bbox1[3], facecolor=color1))
lrx = bbox2[0]-(bbox2[2]/2.0)
lry = bbox2[1]-(bbox2[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox2[2], bbox2[3], facecolor=color2))
plt.show()
def load_fgvc_dataset():
name = 'fgvc-aircraft-2013b'
data = []
bboxes = {}
sizes = {}
source_dir = os.path.join(IO.data_source_dir, 'FGVC', name, 'data')
source_imgdir = os.path.join(source_dir, 'images')
with open(source_imgdir + '_box.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, bbox = line.split(' ', 1)
bboxes[image_id] = list(map(int, bbox.split()))
with open(source_imgdir + '_size.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, size = line.split(' ', 1)
sizes[image_id] = list(map(int, size.split()))
for key in bboxes.keys():
size = sizes[key]
bbox = bboxes[key]
bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet')
data.append(bb[2:])
return np.array(data)
def load_pascal_dataset():
name = 'pascal'
data = []
for year, image_set in datasets:
img_ids_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/ImageSets/Main/{image_set}.txt'
ifs_img_ids = open(img_ids_filename)
img_ids = ifs_img_ids.read().strip().split()
for image_id in img_ids:
|
ifs_img_ids.close()
return np.array(data)
def load_coco_dataset():
name = 'coco'
data = []
for dataset in datasets:
annfile = f'{source_dir}/{name}/annotations/instances_{dataset}.json'
coco = COCO(annfile)
cats = coco.loadCats(coco.getCatIds())
base_classes = {cat['id']: cat['name'] for cat in cats}
img_id_set = set()
for cat_ids in iter(base_classes.keys()):
img_ids = coco.getImgIds(catIds=cat_ids)
img_id_set = img_id_set.union(set(img_ids))
image_ids = list(img_id_set)
for image_id in image_ids:
annIds = coco.getAnnIds(imgIds=image_id)
anns = coco.loadAnns(annIds)
img = coco.loadImgs(image_id)[0]
w = img['width']
h = img['height']
for ann in anns:
b = ann['bbox']
bb = convert_coco_bbox((w, h), b)
data.append(bb[2:])
return np.array(data)
if __name__ == "__main__":
# examples
# k, pascal, coco
# 1, 0.30933335617, 0.252004954777
# 2, 0.45787906725, 0.365835079771
# 3, 0.53198291772, 0.453180358467
# 4, 0.57562962803, 0.500282182136
# 5, 0.58694643198, 0.522010174068
# 6, 0.61789602056, 0.549904351137
# 7, 0.63443906479, 0.569485509501
# 8, 0.65114747974, 0.585718648162
# 9, 0.66393113546, 0.601564171461
# k-means picking the first k points as centroids
img_size = 416
k = 5
# random_data = np.random.random((1000, 2))
# centroids = np.random.random((k, 2))
# random_anchors = kmeans_iou(k, centroids, random_data)
source_dir = IO.data_source_dir
datasets = ('train', 'val', 'test')
fgvc_data = load_fgvc_dataset()
centroids = fgvc_data[np.random.choice(np.arange(len(fgvc_data)), k, replace=False)]
fgvc_anchors = kmeans_iou(k, centroids, fgvc_data, feature_size=img_size / 32)
# datasets = (('2007', 'train'), ('2007', 'val'), ('2012', 'train'), ('2012', 'val'))
# pascal_data = load_pascal_dataset()
# centroids = pascal_data[np.random.choice(np.arange(len(pascal_data)), k, replace=False)]
# # centroids = pascal_data[:k]
# pascal_anchors = kmeans_iou(k, centroids, pascal_data, feature_size=img_size / 32)
# datasets = ('train2014', 'val2014')
# # datasets = ('test2014', 'test2 | anno_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/Annotations/{image_id}.xml'
ifs_anno = open(anno_filename)
tree = ET.parse(ifs_anno)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text),
float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert_bbox((w, h), b)
data.append(bb[2:])
ifs_anno.close() | conditional_block |
k_means_anchor_points.py | if avg_iou > best_avg_iou:
best_avg_iou = avg_iou
best_clusters = clusters
best_avg_iou_iteration = iter_count
print(f"\nIteration {iter_count}")
print(f"Average iou to closest centroid = {avg_iou}")
print(f"Sum of all distances (cost) = {np.sum(clusters_niou)}")
new_centroids = np.array([np.mean(c, axis=0) for c in clusters])
isect = np.prod(np.min(np.asarray([centroids, new_centroids]), axis=0), axis=1)
aa1 = np.prod(centroids, axis=1)
aa2 = np.prod(new_centroids, axis=1)
shifts = 1 - isect / (aa1 + aa2 - isect)
# for i, s in enumerate(shifts):
# print("{}: Cluster size: {}, Centroid distance shift: {}".format(i, len(clusters[i]), s))
if sum(shifts) == 0 or iter_count >= best_avg_iou_iteration + iteration_cutoff:
break
centroids = new_centroids
iter_count += 1
# Get anchor boxes from best clusters
anchors = np.asarray([np.mean(cluster, axis=0) for cluster in best_clusters])
anchors = anchors[anchors[:, 0].argsort()]
print(f"k-means clustering pascal anchor points (original coordinates) \
\nFound at iteration {best_avg_iou_iteration} with best average IoU: {best_avg_iou} \
\n{anchors*feature_size}")
return anchors
def plot_anchors(pascal_anchors, coco_anchors):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_ylim([0, 500])
ax1.set_xlim([0, 900])
for i in range(len(pascal_anchors)):
if area(pascal_anchors[i]) > area(coco_anchors[i]):
bbox1 = pascal_anchors[i]
color1 = "green"
bbox2 = coco_anchors[i]
color2 = "blue"
else:
bbox1 = coco_anchors[i]
color1 = "blue"
bbox2 = pascal_anchors[i]
color2 = "green"
lrx = bbox1[0]-(bbox1[2]/2.0)
lry = bbox1[1]-(bbox1[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox1[2], bbox1[3], facecolor=color1))
lrx = bbox2[0]-(bbox2[2]/2.0)
lry = bbox2[1]-(bbox2[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox2[2], bbox2[3], facecolor=color2))
plt.show()
def load_fgvc_dataset():
name = 'fgvc-aircraft-2013b'
data = []
bboxes = {}
sizes = {}
source_dir = os.path.join(IO.data_source_dir, 'FGVC', name, 'data')
source_imgdir = os.path.join(source_dir, 'images')
with open(source_imgdir + '_box.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, bbox = line.split(' ', 1)
bboxes[image_id] = list(map(int, bbox.split()))
with open(source_imgdir + '_size.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, size = line.split(' ', 1)
sizes[image_id] = list(map(int, size.split()))
for key in bboxes.keys():
size = sizes[key]
bbox = bboxes[key]
bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet')
data.append(bb[2:])
return np.array(data)
def | ():
name = 'pascal'
data = []
for year, image_set in datasets:
img_ids_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/ImageSets/Main/{image_set}.txt'
ifs_img_ids = open(img_ids_filename)
img_ids = ifs_img_ids.read().strip().split()
for image_id in img_ids:
anno_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/Annotations/{image_id}.xml'
ifs_anno = open(anno_filename)
tree = ET.parse(ifs_anno)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text),
float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert_bbox((w, h), b)
data.append(bb[2:])
ifs_anno.close()
ifs_img_ids.close()
return np.array(data)
def load_coco_dataset():
name = 'coco'
data = []
for dataset in datasets:
annfile = f'{source_dir}/{name}/annotations/instances_{dataset}.json'
coco = COCO(annfile)
cats = coco.loadCats(coco.getCatIds())
base_classes = {cat['id']: cat['name'] for cat in cats}
img_id_set = set()
for cat_ids in iter(base_classes.keys()):
img_ids = coco.getImgIds(catIds=cat_ids)
img_id_set = img_id_set.union(set(img_ids))
image_ids = list(img_id_set)
for image_id in image_ids:
annIds = coco.getAnnIds(imgIds=image_id)
anns = coco.loadAnns(annIds)
img = coco.loadImgs(image_id)[0]
w = img['width']
h = img['height']
for ann in anns:
b = ann['bbox']
bb = convert_coco_bbox((w, h), b)
data.append(bb[2:])
return np.array(data)
if __name__ == "__main__":
# examples
# k, pascal, coco
# 1, 0.30933335617, 0.252004954777
# 2, 0.45787906725, 0.365835079771
# 3, 0.53198291772, 0.453180358467
# 4, 0.57562962803, 0.500282182136
# 5, 0.58694643198, 0.522010174068
# 6, 0.61789602056, 0.549904351137
# 7, 0.63443906479, 0.569485509501
# 8, 0.65114747974, 0.585718648162
# 9, 0.66393113546, 0.601564171461
# k-means picking the first k points as centroids
img_size = 416
k = 5
# random_data = np.random.random((1000, 2))
# centroids = np.random.random((k, 2))
# random_anchors = kmeans_iou(k, centroids, random_data)
source_dir = IO.data_source_dir
datasets = ('train', 'val', 'test')
fgvc_data = load_fgvc_dataset()
centroids = fgvc_data[np.random.choice(np.arange(len(fgvc_data)), k, replace=False)]
fgvc_anchors = kmeans_iou(k, centroids, fgvc_data, feature_size=img_size / 32)
# datasets = (('2007', 'train'), ('2007', 'val'), ('2012', 'train'), ('2012', 'val'))
# pascal_data = load_pascal_dataset()
# centroids = pascal_data[np.random.choice(np.arange(len(pascal_data)), k, replace=False)]
# # centroids = pascal_data[:k]
# pascal_anchors = kmeans_iou(k, centroids, pascal_data, feature_size=img_size / 32)
# datasets = ('train2014', 'val2014')
# # datasets = ('test2014', 'test2 | load_pascal_dataset | identifier_name |
k_means_anchor_points.py | import IO
# Original code @ferada http://codereview.stackexchange.com/questions/128315/k-means-clustering-algorithm-implementation
def area(x):
if len(x.shape) == 1:
return x[0] * x[1]
else:
return x[:, 0] * x[:, 1]
def kmeans_iou(k, centroids, points, iter_count=0, iteration_cutoff=25, feature_size=13):
best_clusters = []
best_avg_iou = 0
best_avg_iou_iteration = 0
npoi = points.shape[0]
area_p = area(points) # (npoi, 2) -> (npoi,)
while True:
cen2 = centroids.repeat(npoi, axis=0).reshape(k, npoi, 2)
cdiff = points - cen2
cidx = np.where(cdiff < 0)
cen2[cidx] = points[cidx[1], cidx[2]]
wh = cen2.prod(axis=2).T # (k, npoi, 2) -> (npoi, k)
dist = 1. - (wh / (area_p[:, np.newaxis] + area(centroids) - wh)) # -> (npoi, k)
belongs_to_cluster = np.argmin(dist, axis=1) # (npoi, k) -> (npoi,)
clusters_niou = np.min(dist, axis=1) # (npoi, k) -> (npoi,)
clusters = [points[belongs_to_cluster == i] for i in range(k)]
avg_iou = np.mean(1. - clusters_niou)
if avg_iou > best_avg_iou:
best_avg_iou = avg_iou
best_clusters = clusters
best_avg_iou_iteration = iter_count
print(f"\nIteration {iter_count}")
print(f"Average iou to closest centroid = {avg_iou}")
print(f"Sum of all distances (cost) = {np.sum(clusters_niou)}")
new_centroids = np.array([np.mean(c, axis=0) for c in clusters])
isect = np.prod(np.min(np.asarray([centroids, new_centroids]), axis=0), axis=1)
aa1 = np.prod(centroids, axis=1)
aa2 = np.prod(new_centroids, axis=1)
shifts = 1 - isect / (aa1 + aa2 - isect)
# for i, s in enumerate(shifts):
# print("{}: Cluster size: {}, Centroid distance shift: {}".format(i, len(clusters[i]), s))
if sum(shifts) == 0 or iter_count >= best_avg_iou_iteration + iteration_cutoff:
break
centroids = new_centroids
iter_count += 1
# Get anchor boxes from best clusters
anchors = np.asarray([np.mean(cluster, axis=0) for cluster in best_clusters])
anchors = anchors[anchors[:, 0].argsort()]
print(f"k-means clustering pascal anchor points (original coordinates) \
\nFound at iteration {best_avg_iou_iteration} with best average IoU: {best_avg_iou} \
\n{anchors*feature_size}")
return anchors
def plot_anchors(pascal_anchors, coco_anchors):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_ylim([0, 500])
ax1.set_xlim([0, 900])
for i in range(len(pascal_anchors)):
if area(pascal_anchors[i]) > area(coco_anchors[i]):
bbox1 = pascal_anchors[i]
color1 = "green"
bbox2 = coco_anchors[i]
color2 = "blue"
else:
bbox1 = coco_anchors[i]
color1 = "blue"
bbox2 = pascal_anchors[i]
color2 = "green"
lrx = bbox1[0]-(bbox1[2]/2.0)
lry = bbox1[1]-(bbox1[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox1[2], bbox1[3], facecolor=color1))
lrx = bbox2[0]-(bbox2[2]/2.0)
lry = bbox2[1]-(bbox2[3]/2.0)
ax1.add_patch(patches.Rectangle((lrx, lry), bbox2[2], bbox2[3], facecolor=color2))
plt.show()
def load_fgvc_dataset():
name = 'fgvc-aircraft-2013b'
data = []
bboxes = {}
sizes = {}
source_dir = os.path.join(IO.data_source_dir, 'FGVC', name, 'data')
source_imgdir = os.path.join(source_dir, 'images')
with open(source_imgdir + '_box.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, bbox = line.split(' ', 1)
bboxes[image_id] = list(map(int, bbox.split()))
with open(source_imgdir + '_size.txt') as ifs:
lines = ifs.read().strip().split('\n')
for line in lines:
image_id, size = line.split(' ', 1)
sizes[image_id] = list(map(int, size.split()))
for key in bboxes.keys():
size = sizes[key]
bbox = bboxes[key]
bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet')
data.append(bb[2:])
return np.array(data)
def load_pascal_dataset():
name = 'pascal'
data = []
for year, image_set in datasets:
img_ids_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/ImageSets/Main/{image_set}.txt'
ifs_img_ids = open(img_ids_filename)
img_ids = ifs_img_ids.read().strip().split()
for image_id in img_ids:
anno_filename = f'{source_dir}/{name}/VOCdevkit/VOC{year}/Annotations/{image_id}.xml'
ifs_anno = open(anno_filename)
tree = ET.parse(ifs_anno)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text),
float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert_bbox((w, h), b)
data.append(bb[2:])
ifs_anno.close()
ifs_img_ids.close()
return np.array(data)
def load_coco_dataset():
name = 'coco'
data = []
for dataset in datasets:
annfile = f'{source_dir}/{name}/annotations/instances_{dataset}.json'
coco = COCO(annfile)
cats = coco.loadCats(coco.getCatIds())
base_classes = {cat['id']: cat['name'] for cat in cats}
img_id_set = set()
for cat_ids in iter(base_classes.keys()):
img_ids = coco.getImgIds(catIds=cat_ids)
img_id_set = img_id_set.union(set(img_ids))
image_ids = list(img_id_set)
for image_id in image_ids:
annIds = coco.getAnnIds(imgIds=image_id)
anns = coco.loadAnns(annIds)
img = coco.loadImgs(image_id)[0]
w = img['width']
h = img['height']
for ann in anns:
b = ann['bbox']
bb = convert_coco_bbox((w, h), b)
data.append(bb[2:])
return np.array(data)
if __name__ == "__main__":
# examples
# k, pascal, coco
# 1, 0.30933335617, 0.252004954777
# 2, 0.45787906725, 0.365835079771
# 3, 0.53198291772, 0.453180358467
# 4, 0.57562962803, 0.500282182136
# 5, 0.58694643198, 0.522010174068
# 6, 0.61789602056, 0.5499043 | import matplotlib.patches as patches
import xml.etree.ElementTree as ET
from pycocotools.coco import COCO
from utils import convert_bbox, convert_coco_bbox, BoundingBox | random_line_split |
|
Titanic_RandomForest_0526.py | ['Age']<=40) & (train['Sex']=="male")
train['Young_f'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="female")
train['Cabin_known'] = train['Cabin'].isnull() == False
train['Age_known'] = train['Age'].isnull() == False
train['Family'] = train['SibSp'] + train['Parch']
train['Alone'] = (train['SibSp'] + train['Parch']) == 0
train['Large Family'] = (train['SibSp']>2) | (train['Parch']>3)
train['Deck'] = train['Cabin'].str[0] #船舱号码的第一位
train['Deck'] = train['Deck'].fillna(value='U')
train['Ttype'] = train['Ticket'].str[0] #船票号码的第一位
train['Title'] = train['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
test['Child'] = test['Age']<=10
test['Young'] = (test['Age']>=18) & (test['Age']<=40)
test['Young_m'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="male")
test['Young_f'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="female")
test['Cabin_known'] = test['Cabin'].isnull() == False
test['Age_known'] = test['Age'].isnull() == False
test['Family'] = test['SibSp'] + test['Parch']
test['Alone'] = (test['SibSp'] + test['Parch']) == 0
test['Large Family'] = (test['SibSp']>2) | (test['Parch']>3)
test['Deck'] = test['Cabin'].str[0]
test['Deck'] = test['Deck'].fillna(value='U')
test['Ttype'] = test['Ticket'].str[0]
test['Title'] = test['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
# In[17]:
train['Title'].value_counts()
# In[18]:
train2 = train[train['Title'].isin(['Mr','Miss','Mrs','Master'])]
foo = train2['Age'].hist(by=train2['Title'], bins=np.arange(0,81,1))
# 根据称呼确定年龄范围:
# In[19]:
train['Young'] = (train['Age']<=30) | (train['Title'].isin(['Master','Miss','Mlle','Mme']))
test['Young'] = (test['Age']<=30) | (test['Title'].isin(['Master','Miss','Mlle','Mme']))
# 特征离散
# In[20]:
train["Sex"] = train["Sex"].astype("category")
train["Sex"].cat.categories = [0,1]
train["Sex"] = train["Sex"].astype("int")
train["Embarked"] = train["Embarked"].astype("category")
train["Embarked"].cat.categories = [0,1,2]
train["Embarked"] = train["Embarked"].astype("int")
train["Deck"] = train["Deck"].astype("category")
train["Deck"].cat.categories = [0,1,2,3,4,5,6,7,8]
train["Deck"] = train["Deck"].astype("int")
test["Sex"] = test["Sex"].astype("category")
test["Sex"].cat.categories = [0,1]
test["Sex"] = test["Sex"].astype("int")
test["Embarked"] = test["Embarked"].astype("category")
test["Embarked"].cat.categories = [0,1,2]
test["Embarked"] = test["Embarked"].astype("int")
test["Deck"] = test["Deck"].astype("category")
test["Deck"].cat.categories = [0,1,2,3,4,5,6,7]
test["Deck"] = test["Deck"].astype("int")
# In[21]:
ax = plt.subplots( figsize =( 12 , 10 ) )
foo = sns.heatmap(train.drop('PassengerId',axis=1).corr(), vmax=1.0, square=True, annot=True)
# In[22]:
print(train.isnull().sum())
# In[23]:
train.columns
# In[24]:
train = train.drop(['PassengerId','Name','Age','SibSp','Parch','Ticket','Cabin','Age_known','Family','Title','Fare','Ttype'],axis=1)
# # 构建模型
# In[25]:
from sklearn.model_selection import train_test_split
training, testing = train_test_split(train, test_size=0.2, random_state=0)
print("Total sample size = %i; training sample size = %i, testing sample size = %i" %(train.shape[0],training.shape[0],testing.shape[0]))
# In[26]:
y_test = testing['Survived']
testing = testing.drop('Survived',1)
# In[27]:
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf = clf.fit(training.drop('Survived',1),training['Survived'])
predict1 = clf.predict(testing)
predict1
# In[28]:
pd.Series(predict1).value_counts()
# In[29]:
from ml.classify import RandomForest
from pipeline import Pipeline
from feature_engineer.randomForestImportance import RandomForestImportance
alg1 = RandomForest(labelColName='Survived', featureColNames=list(testing.columns),treeNum=100,maxTreeDeep=None,randomColNum='log2',minNumObj=3)
imp = RandomForestImportance(alg1, labelColName='Survived')
p1 = Pipeline([alg1])
model1 = p1.fit(training)
#predict = model1.transform(testing.drop('Survived',1))
predict = model1.transform(testing)
# In[30]:
predict.head()
# In[31]:
predict.label.value_counts()
# ### 交叉验证
# In[32]:
from sklearn.model_selection import cross_val_score
print alg1.n_estimators
print alg1
score = cross_val_score(alg1, train.drop('Survived',1), train['Survived'], cv=5) # k折交叉检验
# In[33]:
score
# In[34]:
score.mean()
# ### 调参
# In[35]:
# use a grid search algorithm to find the best parameters to run the classifier.
from sklearn.model_selection import GridSearchCV
param_grid = { 'treeNum':[50,100,300,500,700,1000],
'minNumObj':[1,3,5,10,30],
'randomColNum':['auto','sqrt','log2']} # max_features=n_features.
gs = GridSearchCV(estimator=alg1, param_grid=param_grid, scoring='accuracy', cv=5)
gs = gs.fit(train.drop('Survived',1), train['Survived'])
print gs.best_score_
print gs.best_params_
# ### 混淆矩阵
# In[36]:
from sklearn.metrics import confusion_matrix
predict['Survived'] = y_test
x = confusion_matrix(predict['Survived'], predict['label'],labels=[0,1] )
x
# In[37]:
from evaluate.ConfusionMatrix import ConfusionMatrix
predict['Survived'] = y_test
cm = ConfusionMatrix(labelColName='Survived',predictionColName='label') #y_true,y_predict
cmatrix = cm.transform(predict)
# In[38]:
cmatrix
# In[39]:
class_names = ['Survived','UnSurvived']
def show_confusion_matrix(cmatrix, class_labels):
plt.matshow(cmatrix,cmap=plt.cm.YlGn,alpha=0.7)
ax = plt.gca()
ax.set_xlabel('Predicted Label', fontsize=16)
ax.set_xticks(range(0,len(class_labels)))
ax.set_xticklabels(class_labels,rotation=45)
ax.set_ylabel('Actual Label', fontsize=16, rotation=90)
ax.set_yticks(range(0,len(class_labels)))
ax.set_yticklabels(class_labels)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
for row in range(len(cmatrix)):
for col in range(len(cmatrix[row])):
ax.text(col, row, cmatrix[row][col], va='center', ha='center', fontsize=16)
show_confusion_matrix(cmatrix, class_names)
# 从图中可以看出:有8个人实际幸存了但被预测为死亡,有22个人死亡但被预测为幸存,其他为预测正确的人数。
# In[40]:
from evaluate.ClassificationEvaluate import ClassificationEvaluate
ce = ClassificationEvaluate(labelColName='Survived',scoreColName='predict_score')
ce.transform(predict)
# fpr-横坐标,rpr-纵坐标,fpr越小越好,tpr越大越好;AUC为ROC曲线下面的面积
# In[ ]:
| conditional_block |
||
Titanic_RandomForest_0526.py | [152])
# # 特征工程
# 生成新的特征列
# In[16]:
train = train_data
test = test_data
train['Child'] = train['Age']<=10
train['Young'] = (train['Age']>=18) & (train['Age']<=40)
train['Young_m'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="male")
train['Young_f'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="female")
train['Cabin_known'] = train['Cabin'].isnull() == False
train['Age_known'] = train['Age'].isnull() == False
train['Family'] = train['SibSp'] + train['Parch']
train['Alone'] = (train['SibSp'] + train['Parch']) == 0
train['Large Family'] = (train['SibSp']>2) | (train['Parch']>3)
train['Deck'] = train['Cabin'].str[0] #船舱号码的第一位
train['Deck'] = train['Deck'].fillna(value='U')
train['Ttype'] = train['Ticket'].str[0] #船票号码的第一位
train['Title'] = train['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
test['Child'] = test['Age']<=10
test['Young'] = (test['Age']>=18) & (test['Age']<=40)
test['Young_m'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="male")
test['Young_f'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="female")
test['Cabin_known'] = test['Cabin'].isnull() == False
test['Age_known'] = test['Age'].isnull() == False
test['Family'] = test['SibSp'] + test['Parch']
test['Alone'] = (test['SibSp'] + test['Parch']) == 0
test['Large Family'] = (test['SibSp']>2) | (test['Parch']>3)
test['Deck'] = test['Cabin'].str[0]
test['Deck'] = test['Deck'].fillna(value='U')
test['Ttype'] = test['Ticket'].str[0]
test['Title'] = test['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
# In[17]:
train['Title'].value_counts()
# In[18]:
train2 = train[train['Title'].isin(['Mr','Miss','Mrs','Master'])]
foo = train2['Age'].hist(by=train2['Title'], bins=np.arange(0,81,1))
# 根据称呼确定年龄范围:
# In[19]:
train['Young'] = (train['Age']<=30) | (train['Title'].isin(['Master','Miss','Mlle','Mme']))
test['Young'] = (test['Age']<=30) | (test['Title'].isin(['Master','Miss','Mlle','Mme']))
# 特征离散
# In[20]:
train["Sex"] = train["Sex"].astype("category")
train["Sex"].cat.categories = [0,1]
train["Sex"] = train["Sex"].astype("int")
train["Embarked"] = train["Embarked"].astype("category")
train["Embarked"].cat.categories = [0,1,2]
train["Embarked"] = train["Embarked"].astype("int")
train["Deck"] = train["Deck"].astype("category")
train["Deck"].cat.categories = [0,1,2,3,4,5,6,7,8]
train["Deck"] = train["Deck"].astype("int")
test["Sex"] = test["Sex"].astype("category")
test["Sex"].cat.categories = [0,1]
test["Sex"] = test["Sex"].astype("int")
test["Embarked"] = test["Embarked"].astype("category")
test["Embarked"].cat.categories = [0,1,2]
test["Embarked"] = test["Embarked"].astype("int")
test["Deck"] = test["Deck"].astype("category")
test["Deck"].cat.categories = [0,1,2,3,4,5,6,7]
test["Deck"] = test["Deck"].astype("int")
# In[21]:
ax = plt.subplots( figsize =( 12 , 10 ) )
foo = sns.heatmap(train.drop('PassengerId',axis=1).corr(), vmax=1.0, square=True, annot=True)
# In[22]:
print(train.isnull().sum())
# In[23]:
train.columns
# In[24]:
train = train.drop(['PassengerId','Name','Age','SibSp','Parch','Ticket','Cabin','Age_known','Family','Title','Fare','Ttype'],axis=1)
# # 构建模型
# In[25]:
from sklearn.model_selection import train_test_split
training, testing = train_test_split(train, test_size=0.2, random_state=0)
print("Total sample size = %i; training sample size = %i, testing sample size = %i" %(train.shape[0],training.shape[0],testing.shape[0]))
# In[26]:
y_test = testing['Survived']
testing = testing.drop('Survived',1)
# In[27]:
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf = clf.fit(training.drop('Survived',1),training['Survived'])
predict1 = clf.predict(testing)
predict1
# In[28]:
pd.Series(predict1).value_counts()
# In[29]:
from ml.classify import RandomForest
from pipeline import Pipeline
from feature_engineer.randomForestImportance import RandomForestImportance
alg1 = RandomForest(labelColName='Survived', featureColNames=list(testing.columns),treeNum=100,maxTreeDeep=None,randomColNum='log2',minNumObj=3)
imp = RandomForestImportance(alg1, labelColName='Survived')
p1 = Pipeline([alg1])
model1 = p1.fit(training)
#predict = model1.transform(testing.drop('Survived',1))
predict = model1.transform(testing)
# In[30]:
predict.head()
# In[31]:
predict.label.value_counts()
# ### 交叉验证
# In[32]:
from sklearn.model_selection import cross_val_score
print alg1.n_estimators
print alg1
score = cross_val_score(alg1, train.drop('Survived',1), train['Survived'], cv=5) # k折交叉检验
# In[33]:
score
# In[34]:
score.mean()
# ### 调参
# In[35]:
# use a grid search algorithm to find the best parameters to run the classifier.
from sklearn.model_selection import GridSearchCV
param_grid = { 'treeNum':[50,100,300,500,700,1000],
'minNumObj':[1,3,5,10,30],
'randomColNum':['auto','sqrt','log2']} # max_features=n_features.
gs = GridSearchCV(estimator=alg1, param_grid=param_grid, scoring='accuracy', cv=5)
gs = gs.fit(train.drop('Survived',1), train['Survived'])
print gs.best_score_
print gs.best_params_
# ### 混淆矩阵
# In[36]:
from sklearn.metrics import confusion_matrix
predict['Survived'] = y_test
x = confusion_matrix(predict['Survived'], predict['label'],labels=[0,1] )
x
# In[37]:
from evaluate.ConfusionMatrix import ConfusionMatrix
predict['Survived'] = y_test
cm = ConfusionMatrix(labelColName='Survived',predictionColName='label') #y_true,y_predict
cmatrix = cm.transform(predict)
# In[38]:
cmatrix
# In[39]:
class_names = ['Survived','UnSurvived']
def show_confusion_matrix(cmatrix, class_labels):
plt.matshow(cmatrix,cmap=plt.cm.YlGn,alpha=0.7)
ax = plt.gca()
ax.set_xlabel('Predicted Label', fontsize=16)
ax.set_xticks(range(0,len(class_labels)))
ax.set_xticklabels(class_labels,rotation=45)
ax.set_ylabel('Actual Label', fontsize=16, rotation=90)
ax.set_yticks(range(0,len(class_labels)))
ax.set_yticklabels(class_labels)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
for row in range(len(cmatrix)):
for col in range(len(cmatrix[row])):
ax.text(col, row, cmatrix[row][col], va='center', ha='center', fontsize=16)
show_confusion_matrix(cmatrix, class_names)
# 从图中可以看出: | 有8个人实际幸存了但被预测为死亡,有22个 | identifier_name |
|
Titanic_RandomForest_0526.py | )
plt.subplot(337)
sns.distplot(surv['Fare'].dropna().values,bins = range(0,513,1),kde=False, color='blue')
sns.distplot(nosurv['Fare'].dropna().values,bins = range(0,513,1),kde=False, color='red', axlabel='Fare')
plt.subplot(338)
sns.distplot(np.log10(surv['Fare'].dropna().values+1), kde=False, color='blue')
sns.distplot(np.log10(nosurv['Fare'].dropna().values+1), kde=False, color='red',axlabel='Fare')
plt.subplots_adjust(top=0.92, bottom=0.07, left=0.10, right=0.95, hspace=0.25,wspace=0.35)
import numpy as np
print("Median age survivors: %.1f, Median age non-survivers: %.1f" %(np.median(surv['Age'].dropna()), np.median(nosurv['Age'].dropna())))
# 上图可视化了不同特征的分布情况,数值型特征用直方图表示,类别型特征用柱状图表示。
#
# 从上图可以看出:年龄在10岁以下的孩子存活率较高,女性比男性存活率高,1等舱乘客比其他乘客存活率高,有1-3个亲人的乘客比独自一人或全家人出行的乘客存活率更高,出发港口为C的乘客存活率较高,船舱越便宜的乘客存活率越小。
# 柱状图中的黑色竖线是置信度线,显示为ci=1,不显示为ci=0,长度表示置信区间的大小,画大约估计的值。
# ## 特征之间的关系
# In[8]:
plt.figure(figsize=(12,10))
foo = sns.heatmap(train_data.drop(['PassengerId','Name'],axis=1).corr(),vmax=0.4,square=True, annot=True)
# 从图中可以看出,pclass,fare与survived有很密切的关系.
# # 数据预处理
# ### Missing Values
# In[9]:
print(train_data.isnull().sum())
# In[10]:
print(test_data.isnull().sum())
# #### Embarked
# In[11]:
print(train_data[train_data['Embarked'].isnull()])
# 因为这两名乘客都是女性,且在1等舱,而且幸存,由幸存人员embarked分布可以将其填为‘C’
# In[12]:
#train_data['Embarked'].iloc[61]='C'
#train_data['Embarked'].iloc[829]='C'
from preprocess.FillMissingValues import FillMissingValues
FillMissingValues('Embarked',alter_method='ud',user_defined='C')
# In[13]:
print (train_data.iloc[61])
# #### Fare
# In[14]:
print(test_data[test_data['Fare'].isnull()])
# In[15]:
combine = pd.concat([train_data.drop('Survived',1),test_data])
test_data['Fare'].iloc[152] = combine['Fare'][combine['Pclass'] == 3].dropna().median()
print(test_data['Fare'].iloc[152])
# # 特征工程
# 生成新的特征列
# In[16]:
train = train_data
test = test_data
train['Child'] = train['Age']<=10
train['Young'] = (train['Age']>=18) & (train['Age']<=40)
train['Young_m'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="male")
train['Young_f'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="female")
train['Cabin_known'] = train['Cabin'].isnull() == False
train['Age_known'] = train['Age'].isnull() == False
train['Family'] = train['SibSp'] + train['Parch']
train['Alone'] = (train['SibSp'] + train['Parch']) == 0
train['Large Family'] = (train['SibSp']>2) | (train['Parch']>3)
train['Deck'] = train['Cabin'].str[0] #船舱号码的第一位
train['Deck'] = train['Deck'].fillna(value='U')
train['Ttype'] = train['Ticket'].str[0] #船票号码的第一位
train['Title'] = train['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
test['Child'] = test['Age']<=10
test['Young'] = (test['Age']>=18) & (test['Age']<=40)
test['Young_m'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="male")
test['Young_f'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="female")
test['Cabin_known'] = test['Cabin'].isnull() == False
test['Age_known'] = test['Age'].isnull() == False
test['Family'] = test['SibSp'] + test['Parch']
test['Alone'] = (test['SibSp'] + test['Parch']) == 0
test['Large Family'] = (test['SibSp']>2) | (test['Parch']>3)
test['Deck'] = test['Cabin'].str[0]
test['Deck'] = test['Deck'].fillna(value='U')
test['Ttype'] = test['Ticket'].str[0]
test['Title'] = test['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
# In[17]:
train['Title'].value_counts()
# In[18]:
train2 = train[train['Title'].isin(['Mr','Miss','Mrs','Master'])]
foo = train2['Age'].hist(by=train2['Title'], bins=np.arange(0,81,1))
# 根据称呼确定年龄范围:
# In[19]:
train['Young'] = (train['Age']<=30) | (train['Title'].isin(['Master','Miss','Mlle','Mme']))
test['Young'] = (test['Age']<=30) | (test['Title'].isin(['Master','Miss','Mlle','Mme']))
# 特征离散
# In[20]:
train["Sex"] = train["Sex"].astype("category")
train["Sex"].cat.categories = [0,1]
train["Sex"] = train["Sex"].astype("int")
train["Embarked"] = train["Embarked"].astype("category")
train["Embarked"].cat.categories = [0,1,2]
train["Embarked"] = train["Embarked"].astype("int")
train["Deck"] = train["Deck"].astype("category")
train["Deck"].cat.categories = [0,1,2,3,4,5,6,7,8]
train["Deck"] = train["Deck"].astype("int")
test["Sex"] = test["Sex"].astype("category")
test["Sex"].cat.categories = [0,1]
test["Sex"] = test["Sex"].astype("int")
test["Embarked"] = test["Embarked"].astype("category")
test["Embarked"].cat.categories = [0,1,2]
test["Embarked"] = test["Embarked"].astype("int")
test["Deck"] = test["Deck"].astype("category")
test["Deck"].cat.categories = [0,1,2,3,4,5,6,7]
test["Deck"] = test["Deck"].astype("int")
# In[21]:
ax = plt.subplots( figsize =( 12 , 10 ) )
foo = sns.heatmap(train.drop('PassengerId',axis=1).corr(), vmax=1.0, square=True, annot=True)
# In[22]:
print(train.isnull().sum())
# In[23]:
train.columns
# In[24]:
train = train.drop(['PassengerId','Name','Age','SibSp','Parch','Ticket','Cabin','Age_known','Family','Title','Fare','Ttype'],axis=1)
# # 构建模型
# In[25]:
from sklearn.model_selection import train_test_split
training, testing = train_test_split(train, test_size=0.2, random_state=0)
print("Total sample size = %i; training sample size = %i, testing sample size = %i" %(train.shape[0],training.shape[0],testing.shape[0]))
# In[26]:
y_test = testing['Survived']
testing = testing.drop('Survived',1)
# In[27]:
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf = clf.fit(training.drop('Survived',1),training['Survived'])
predict1 = clf.predict(testing)
predict1
| # In[28]:
pd.Series(predict1).value_counts() | random_line_split |
|
Titanic_RandomForest_0526.py | ['Age']<=40) & (train['Sex']=="male")
train['Young_f'] = (train['Age']>=18) & (train['Age']<=40) & (train['Sex']=="female")
train['Cabin_known'] = train['Cabin'].isnull() == False
train['Age_known'] = train['Age'].isnull() == False
train['Family'] = train['SibSp'] + train['Parch']
train['Alone'] = (train['SibSp'] + train['Parch']) == 0
train['Large Family'] = (train['SibSp']>2) | (train['Parch']>3)
train['Deck'] = train['Cabin'].str[0] #船舱号码的第一位
train['Deck'] = train['Deck'].fillna(value='U')
train['Ttype'] = train['Ticket'].str[0] #船票号码的第一位
train['Title'] = train['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
test['Child'] = test['Age']<=10
test['Young'] = (test['Age']>=18) & (test['Age']<=40)
test['Young_m'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="male")
test['Young_f'] = (test['Age']>=18) & (test['Age']<=40) & (test['Sex']=="female")
test['Cabin_known'] = test['Cabin'].isnull() == False
test['Age_known'] = test['Age'].isnull() == False
test['Family'] = test['SibSp'] + test['Parch']
test['Alone'] = (test['SibSp'] + test['Parch']) == 0
test['Large Family'] = (test['SibSp']>2) | (test['Parch']>3)
test['Deck'] = test['Cabin'].str[0]
test['Deck'] = test['Deck'].fillna(value='U')
test['Ttype'] = test['Ticket'].str[0]
test['Title'] = test['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
# In[17]:
train['Title'].value_counts()
# In[18]:
train2 = train[train['Title'].isin(['Mr','Miss','Mrs','Master'])]
foo = train2['Age'].hist(by=train2['Title'], bins=np.arange(0,81,1))
# 根据称呼确定年龄范围:
# In[19]:
train['Young'] = (train['Age']<=30) | (train['Title'].isin(['Master','Miss','Mlle','Mme']))
test['Young'] = (test['Age']<=30) | (test['Title'].isin(['Master','Miss','Mlle','Mme']))
# 特征离散
# In[20]:
train["Sex"] = train["Sex"].astype("category")
train["Sex"].cat.categories = [0,1]
train["Sex"] = train["Sex"].astype("int")
train["Embarked"] = train["Embarked"].astype("category")
train["Embarked"].cat.categories = [0,1,2]
train["Embarked"] = train["Embarked"].astype("int")
train["Deck"] = train["Deck"].astype("category")
train["Deck"].cat.categories = [0,1,2,3,4,5,6,7,8]
train["Deck"] = train["Deck"].astype("int")
test["Sex"] = test["Sex"].astype("category")
test["Sex"].cat.categories = [0,1]
test["Sex"] = test["Sex"].astype("int")
test["Embarked"] = test["Embarked"].astype("category")
test["Embarked"].cat.categories = [0,1,2]
test["Embarked"] = test["Embarked"].astype("int")
test["Deck"] = test["Deck"].astype("category")
test["Deck"].cat.categories = [0,1,2,3,4,5,6,7]
test["Deck"] = test["Deck"].astype("int")
# In[21]:
ax = plt.subplots( figsize =( 12 , 10 ) )
foo = sns.heatmap(train.drop('PassengerId',axis=1).corr(), vmax=1.0, square=True, annot=True)
# In[22]:
print(train.isnull().sum())
# In[23]:
train.columns
# In[24]:
train = train.drop(['PassengerId','Name','Age','SibSp','Parch','Ticket','Cabin','Age_known','Family','Title','Fare','Ttype'],axis=1)
# # 构建模型
# In[25]:
from sklearn.model_selection import train_test_split
training, testing = train_test_split(train, test_size=0.2, random_state=0)
print("Total sample size = %i; training sample size = %i, testing sample size = %i" %(train.shape[0],training.shape[0],testing.shape[0]))
# In[26]:
y_test = testing['Survived']
testing = testing.drop('Survived',1)
# In[27]:
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf = clf.fit(training.drop('Survived',1),training['Survived'])
predict1 = clf.predict(testing)
predict1
# In[28]:
pd.Series(predict1).value_counts()
# In[29]:
from ml.classify import RandomForest
from pipeline import Pipeline
from feature_engineer.randomForestImportance import RandomForestImportance
alg1 = RandomForest(labelColName='Survived', featureColNames=list(testing.columns),treeNum=100,maxTreeDeep=None,randomColNum='log2',minNumObj=3)
imp = RandomForestImportance(alg1, labelColName='Survived')
p1 = Pipeline([alg1])
model1 = p1.fit(training)
#predict = model1.transform(testing.drop('Survived',1))
predict = model1.transform(testing)
# In[30]:
predict.head()
# In[31]:
predict.label.value_counts()
# ### 交叉验证
# In[32]:
from sklearn.model_selection import cross_val_score
print alg1.n_estimators
print alg1
score = cross_val_score(alg1, train.drop('Survived',1), train['Survived'], cv=5) # k折交叉检验
# In[33]:
score
# In[34]:
score.mean()
# ### 调参
# In[35]:
# use a grid search algorithm to find the best parameters to run the classifier.
from sklearn.model_selection import GridSearchCV
param_grid = { 'treeNum':[50,100,300,500,700,1000],
'minNumObj':[1,3,5,10,30],
'randomColNum':['auto','sqrt','log2']} # max_features=n_features.
gs = GridSearchCV(estimator=alg1, param_grid=param_grid, scoring='accuracy', cv=5)
gs = gs.fit(train.drop('Survived',1), train['Survived'])
print gs.best_score_
print gs.best_params_
# ### 混淆矩阵
# In[36]:
from sklearn.metrics import confusion_matrix
predict['Survived'] = y_test
x = confusion_matrix(predict['Survived'], predict['label'],labels=[0,1] )
x
# In[37]:
from evaluate.ConfusionMatrix import ConfusionMatrix
predict['Survived'] = y_test
cm = ConfusionMatrix(labelColName='Survived',predictionColName='label') #y_true,y_predict
cmatrix = cm.transform(predict)
# In[38]:
cmatrix
# In[39]:
class_names = ['Survived','UnSurvived']
def show_confusion_matrix(cmatrix, class_labels):
plt.matshow(cmatrix,cmap=plt.cm.YlGn,alpha=0.7)
ax = plt.gca()
ax.set_xlabel('Predicted Label', fontsize=16)
ax.set_xticks(range(0,len(class_labels)))
ax.set_xticklabels(class_labels,rotation=45)
ax.set_ylabel('Actual Label', fontsize=16, rotation=90)
ax.set_yticks(range(0,len(class_labels)))
ax.set_yticklabels(class_labels)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
for row in range(len(cmatrix)):
for col in range(len(cmatrix[row])):
ax.text(col, row, cmatrix[row][col], va='center', ha='center', fontsize=16)
show_confusion_matrix(cmatrix, class_names)
# 从图中可以看出:有8个人实际幸存了但被预测为死亡,有22个人死亡但被预测为幸存,其他为预测正确的人数。
# In[ | 40]:
from evaluate.ClassificationEvaluate import ClassificationEvaluate
ce = ClassificationEvaluate(labelColName='Survived',scoreColName='predict_score')
ce.transform(predict)
# fpr-横坐标,rpr-纵坐标,fpr越小越好,tpr越大越好;AUC为ROC曲线下面的面积
# In[ ]:
| identifier_body |
|
kube.go | d)", apierrors.FromObject(obj).Error(), r.StatusCode)
}
if s, ok := obj.(*metav1.Status); ok {
d := s.Details
if d == nil {
return obj, s.Message, nil
}
return obj, fmt.Sprintf("%s%s `%s", d.Kind, d.Group, d.Name), nil
}
if in, ok := obj.(metav1.Object); ok {
return obj, fmt.Sprintf("%s%s `%s'", strings.ToLower(gvk.Kind), maybeCore(gvk.Group), maybeNamespaced(in.GetName(), in.GetNamespace())), nil
}
if _, ok := obj.(metav1.ListInterface); ok {
return obj, fmt.Sprintf("%s%s'", strings.ToLower(gvk.Kind), maybeCore(gvk.Group)), nil
}
return nil, "", fmt.Errorf("returned object does not implement `metav1.Object` or `metav1.ListInterface`: %v", obj)
}
// kubePeek checks if object by url exists in Kubernetes.
func (m *kubePackage) kubePeek(ctx context.Context, url string) (obj runtime.Object, found bool, err error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, false, err
}
log.V(1).Infof("GET to %s", url)
resp, err := m.httpClient.Do(req.WithContext(ctx))
if err != nil {
return nil, false, err
}
if resp.StatusCode == http.StatusNotFound {
return nil, false, nil
}
obj, _, err = parseHTTPResponse(resp)
if err != nil {
return nil, false, err
}
return obj, true, nil
}
var ErrUpdateImmutable = errors.New("cannot update immutable. Use -force to delete and recreate")
func ErrImmutableRessource(attribute string, obj runtime.Object) error {
return fmt.Errorf("failed to update %s of resource %s: %w", attribute, obj.GetObjectKind().GroupVersionKind().String(), ErrUpdateImmutable)
}
// mergeObjects merges the fields from the live object to the new
// object such as resource version and clusterIP.
// TODO(jon.yucel): Instead of selectively picking fields, holisticly
// solving this problem requires three-way merge implementation.
func mergeObjects(live, obj runtime.Object) error {
// Service's clusterIP needs to be re-set to the value provided
// by controller or mutation will be denied.
if liveSvc, ok := live.(*corev1.Service); ok {
svc := obj.(*corev1.Service)
svc.Spec.ClusterIP = liveSvc.Spec.ClusterIP
gotPort := liveSvc.Spec.HealthCheckNodePort
wantPort := svc.Spec.HealthCheckNodePort
// If port is set (non-zero) and doesn't match the existing port (also non-zero), error out.
if wantPort != 0 && gotPort != 0 && wantPort != gotPort {
return ErrImmutableRessource(".spec.healthCheckNodePort", obj)
}
svc.Spec.HealthCheckNodePort = gotPort
}
if liveClusterRoleBinding, ok := live.(*rbacv1.ClusterRoleBinding); ok {
clusterRoleBinding := obj.(*rbacv1.ClusterRoleBinding)
if liveClusterRoleBinding.RoleRef.APIGroup != clusterRoleBinding.RoleRef.APIGroup ||
liveClusterRoleBinding.RoleRef.Kind != clusterRoleBinding.RoleRef.Kind ||
liveClusterRoleBinding.RoleRef.Name != clusterRoleBinding.RoleRef.Name {
return ErrImmutableRessource("roleRef", obj)
}
}
// Set metadata.resourceVersion for updates as required by
// Kubernetes API (http://go/k8s-concurrency).
if gotRV := live.(metav1.Object).GetResourceVersion(); gotRV != "" {
obj.(metav1.Object).SetResourceVersion(gotRV)
}
return nil
}
// maybeRecreate can be called to check if a resource can be updated or
// is immutable and needs recreation.
// It evaluates if resource should be forcefully recreated. In that case
// the resource will be deleted and recreated. If the -force flag is not
// enabled and an immutable resource should be updated, an error is thrown
// and no resources will get deleted.
func maybeRecreate(ctx context.Context, live, obj runtime.Object, m *kubePackage, r *apiResource) error {
err := mergeObjects(live, obj)
if errors.Is(errors.Unwrap(err), ErrUpdateImmutable) && m.force {
if m.dryRun {
fmt.Fprintf(os.Stdout, "\n\n**WARNING** %s %s is immutable and will be deleted and recreated.\n", strings.ToLower(r.GVK.Kind), maybeNamespaced(r.Name, r.Namespace))
}
// kubeDelete() already properly handles a dry run, so the resource won't be deleted if -force is set, but in dry run mode
if err := m.kubeDelete(ctx, r, true); err != nil {
return err
}
} else if err != nil {
return err
}
return nil
}
// kubeUpdate creates or overwrites object in Kubernetes.
// Path is computed based on msg type, name and (optional) namespace (these must
// not conflict with name and namespace set in object metadata).
func (m *kubePackage) kubeUpdate(ctx context.Context, r *apiResource, msg proto.Message) error {
uri := r.PathWithName()
live, found, err := m.kubePeek(ctx, m.Master+uri)
if err != nil {
return err
}
method := http.MethodPut
if found {
// Reset uri in case subresource update is requested.
uri = r.PathWithSubresource()
if err := maybeRecreate(ctx, live, msg.(runtime.Object), m, r); err != nil {
return err
}
} else { // Object doesn't exist so create it.
if r.Subresource != "" {
return errors.New("parent resource does not exist")
}
method = http.MethodPost
uri = r.Path()
}
bs, err := marshal(msg, r.GVK)
if err != nil {
return err
}
url := m.Master + uri
// Set body type as marshaled Protobuf.
// TODO(dmitry-ilyevskiy): Will not work for CRDs (only json encoding
// is supported) so the user will have to indicate this is a
// non-standard type (or we could deduce that ourselves).
contentType := "application/vnd.kubernetes.protobuf"
req, err := http.NewRequest(method, url, bytes.NewReader(bs))
if err != nil {
return err
}
req.Header.Set("Content-Type", contentType)
log.V(1).Infof("%s to %s", method, url)
if log.V(2) {
s, err := renderObj(msg.(runtime.Object), &r.GVK, bool(log.V(3)) /* If --v=3, only return JSON. */, m.diffFilters)
if err != nil {
return fmt.Errorf("failed to render :live object for %s: %v", r.String(), err)
}
log.Infof("%s:\n%s", r.String(), s)
}
if m.diff {
if err := printUnifiedDiff(os.Stdout, live, msg.(runtime.Object), r.GVK, maybeNamespaced(r.Name, r.Namespace), m.diffFilters); err != nil {
return err
}
}
if m.dryRun {
return printUnifiedDiff(os.Stdout, live, msg.(runtime.Object), r.GVK, maybeNamespaced(r.Name, r.Namespace), m.diffFilters)
}
resp, err := m.httpClient.Do(req.WithContext(ctx))
if err != nil {
return err
}
_, rMsg, err := parseHTTPResponse(resp)
if err != nil {
return err
}
actionMsg := "created"
if method == http.MethodPut {
actionMsg = "updated"
}
log.Infof("%s %s", rMsg, actionMsg)
return nil
}
// kubeDelete deletes namespace/name resource in Kubernetes.
// Attempts to deduce GroupVersionResource from apiGroup (optional) and resource
// strings. Fails if multiple matches found.
func (m *kubePackage) kubeDelete(_ context.Context, r *apiResource, foreground bool) error {
var c dynamic.ResourceInterface = m.dynClient.Resource(r.GroupVersionResource())
if r.Namespace != "" {
c = c.(dynamic.NamespaceableResourceInterface).Namespace(r.Namespace)
}
delPolicy := metav1.DeletePropagationBackground
if foreground {
delPolicy = metav1.DeletePropagationForeground
}
log.V(1).Infof("DELETE to %s", m.Master+r.PathWithName())
if m.dryRun {
return nil
}
if err := c.Delete(context.TODO(), r.Name, metav1.DeleteOptions{
PropagationPolicy: &delPolicy,
}); err != nil {
return err
}
log.Infof("%v deleted", r)
return nil
}
// waitRetryInterval is a duration between consecutive get retries.
const waitRetryInterval = 500 * time.Millisecond
var ErrNotFound = errors.New("not found")
// kubeGet attempts to read namespace/name resource from an apiGroup from API
// Server.
// If object is not present will retry every waitRetryInterval up to wait (only
// tries once if wait is zero).
func (m *kubePackage) kubeGet(ctx context.Context, r *apiResource, wait time.Duration) (runtime.Object, error) { | random_line_split |
||
kube.go | resArg[1].(starlark.String)
if !ok {
err = errors.New("expected string for resource name")
return
}
name = string(nameArg)
return
}
// kubePutFn is entry point for `kube.put' callable.
// TODO(dmitry-ilyevskiy): Return Status object from the response as Starlark dict.
func (m *kubePackage) kubePutFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var name, namespace, apiGroup, subresource string
data := &starlark.List{}
unpacked := []interface{}{
"name", &name,
"data", &data,
"namespace?", &namespace,
// TODO(dmitry-ilyevskiy): Remove this when https://github.com/stripe/skycfg/issues/14
// is resolved upstream.
"api_group?", &apiGroup,
"subresource?", &subresource,
}
if err := starlark.UnpackArgs(b.Name(), args, kwargs, unpacked...); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
for i := 0; i < data.Len(); i++ {
maybeMsg := data.Index(i)
msg, ok := skycfg.AsProtoMessage(maybeMsg)
if !ok {
return nil, fmt.Errorf("<%v>: item %d is not a protobuf type. got: %s", b.Name(), i, maybeMsg.Type())
}
sCtx := t.Local(addon.SkyCtxKey).(*addon.SkyCtx)
if err := m.setMetadata(sCtx, name, namespace, msg.(runtime.Object)); err != nil {
return nil, fmt.Errorf("<%v>: failed to validate/apply metadata for object %d => %v: %v", b.Name(), i, maybeMsg.Type(), err)
}
r, err := newResourceForMsg(m.dClient, name, namespace, apiGroup, subresource, msg)
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
}
ctx := t.Local(addon.GoCtxKey).(context.Context)
if err := m.kubeUpdate(ctx, r, msg); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
}
return starlark.None, nil
}
// kubeDeleteFn is entry point for `kube.delete' callable.
// TODO(dmitry-ilyevskiy): Return Status object from the response as Starlark dict.
func (m *kubePackage) kubeDeleteFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(args) != 0 {
return nil, fmt.Errorf("<%v>: positional args not supported by `kube.delete': %v", b.Name(), args)
}
if len(kwargs) < 1 {
return nil, fmt.Errorf("<%v>: expected at least <resource>=<name>", b.Name())
}
resource, name, err := getResourceAndName(kwargs[0])
if err != nil {
return nil, fmt.Errorf("<%v>: %s", b.Name(), err.Error())
}
// If resource is not namespace itself (special case) attempt to parse
// namespace out of the arg value.
var namespace string
if resource != namespaceResrc {
ss := strings.Split(name, "/")
if len(ss) > 1 {
namespace = ss[0]
name = ss[1]
}
}
// Optional api_group argument.
var apiGroup starlark.String
var foreground starlark.Bool
for _, kv := range kwargs[1:] {
switch string(kv[0].(starlark.String)) {
case apiGroupKW:
var ok bool
if apiGroup, ok = kv[1].(starlark.String); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `%s' arg, got: %s", b.Name(), apiGroupKW, kv[1].Type())
}
case "foreground":
var ok bool
if foreground, ok = kv[1].(starlark.Bool); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `foreground' arg, got: %s", b.Name(), kv[1].Type())
}
default:
return nil, fmt.Errorf("<%v>: expected `api_group' or `foreground', got: %v=%v", b.Name(), kv[0], kv[1])
}
}
r, err := newResource(m.dClient, name, namespace, string(apiGroup), resource, "")
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
}
ctx := t.Local(addon.GoCtxKey).(context.Context)
if err := m.kubeDelete(ctx, r, bool(foreground)); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
return starlark.None, nil
}
// kubeGetFn is an entry point for `kube.get` built-in.
func (m *kubePackage) kubeGetFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(args) != 0 {
return nil, fmt.Errorf("<%v>: positional args not supported: %v", b.Name(), args)
}
if len(kwargs) < 1 {
return nil, fmt.Errorf("<%v>: expected <resource>=<name>", b.Name())
}
resource, name, err := getResourceAndName(kwargs[0])
if err != nil {
return nil, fmt.Errorf("<%v>: %s", b.Name(), err.Error())
}
// If resource is not namespace itself (special case), attempt to parse
// namespace out of the arg value.
var namespace string
if resource != namespaceResrc {
ss := strings.Split(name, "/")
if len(ss) > 1 {
namespace = ss[0]
name = ss[1]
}
}
// Optional api_group argument.
var apiGroup starlark.String
var wait = 30 * time.Second
var wantJSON bool
for _, kv := range kwargs[1:] {
switch string(kv[0].(starlark.String)) {
case apiGroupKW:
var ok bool
if apiGroup, ok = kv[1].(starlark.String); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `%s' arg, got: %s", b.Name(), apiGroupKW, kv[1].Type())
}
case "wait":
durStr, ok := kv[1].(starlark.String)
if !ok {
return nil, fmt.Errorf("<%v>: expected string value for `wait' arg, got: %s", b.Name(), kv[1].Type())
}
var err error
if wait, err = time.ParseDuration(string(durStr)); err != nil {
return nil, fmt.Errorf("<%v>: failed to parse duration value: %v", b.Name(), err)
}
case "json":
bv, ok := kv[1].(starlark.Bool)
if !ok {
return nil, fmt.Errorf("<%v>: expected boolean value for `json' arg, got: %s", b.Name(), kv[1].Type())
}
wantJSON = bool(bv)
default:
return nil, fmt.Errorf("<%v>: expected one of [ api_group | wait | json ] args, got: %v=%v", b.Name(), kv[0], kv[1])
}
}
r, err := newResource(m.dClient, name, namespace, string(apiGroup), resource, "")
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
}
ctx := t.Local(addon.GoCtxKey).(context.Context)
obj, err := m.kubeGet(ctx, r, wait)
if err != nil {
return nil, fmt.Errorf("<%v>: failed to get %s%s `%s': %v", b.Name(), resource, maybeCore(string(apiGroup)), name, err)
}
if wantJSON {
un, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, fmt.Errorf("<%v>: failed to convert %s%s `%s' to unstructured JSON: %v", b.Name(), resource, maybeCore(string(apiGroup)), name, err)
}
return util.ValueFromNestedMap(un)
}
p, ok := obj.(proto.Message)
if !ok {
return nil, fmt.Errorf("<%v>: could not convert object to proto: %v", b.Name(), obj)
}
return skycfg.NewProtoMessage(p), nil
}
// kubeExistsFn is an entry point for `kube.exists` built-in.
func (m *kubePackage) | kubeExistsFn | identifier_name |
|
kube.go | err != nil {
return nil, err
}
v, k := gvk.ToAPIVersionAndKind()
unknownBytes, err := proto.Marshal(&runtime.Unknown{
TypeMeta: runtime.TypeMeta{
APIVersion: v,
Kind: k,
},
Raw: msgBytes,
})
if err != nil {
return nil, err
}
return append(k8sProtoMagic, unknownBytes...), nil
}
var decodeFn = Codecs.UniversalDeserializer().Decode
func decode(raw []byte) (runtime.Object, *schema.GroupVersionKind, error) {
obj, gvk, err := decodeFn(raw, nil, nil)
if err == nil {
return obj, gvk, nil
}
if !runtime.IsNotRegisteredError(err) {
return nil, nil, err
}
// When the input is already a json, this just returns it as-is.
j, err := yaml.YAMLToJSON(raw)
if err != nil {
return nil, nil, err
}
return unstructured.UnstructuredJSONScheme.Decode(j, nil, nil)
}
// parseHTTPResponse parses response body to extract runtime.Object
// and HTTP return code.
// Returns details message on success and error on failure (includes HTTP
// response codes not in 2XX).
func parseHTTPResponse(r *http.Response) (obj runtime.Object, details string, err error) {
raw, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, "", fmt.Errorf("failed to read body (response code: %d): %v", r.StatusCode, err)
}
log.V(2).Infof("Response raw data: %s", raw)
obj, gvk, err := decode(raw)
if err != nil {
return nil, "", fmt.Errorf("failed to parse json object (response code: %d): %v", r.StatusCode, err)
}
if r.StatusCode < 200 || r.StatusCode >= 300 {
return nil, "", fmt.Errorf("%s (response code: %d)", apierrors.FromObject(obj).Error(), r.StatusCode)
}
if s, ok := obj.(*metav1.Status); ok {
d := s.Details
if d == nil {
return obj, s.Message, nil
}
return obj, fmt.Sprintf("%s%s `%s", d.Kind, d.Group, d.Name), nil
}
if in, ok := obj.(metav1.Object); ok {
return obj, fmt.Sprintf("%s%s `%s'", strings.ToLower(gvk.Kind), maybeCore(gvk.Group), maybeNamespaced(in.GetName(), in.GetNamespace())), nil
}
if _, ok := obj.(metav1.ListInterface); ok {
return obj, fmt.Sprintf("%s%s'", strings.ToLower(gvk.Kind), maybeCore(gvk.Group)), nil
}
return nil, "", fmt.Errorf("returned object does not implement `metav1.Object` or `metav1.ListInterface`: %v", obj)
}
// kubePeek checks if object by url exists in Kubernetes.
func (m *kubePackage) kubePeek(ctx context.Context, url string) (obj runtime.Object, found bool, err error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, false, err
}
log.V(1).Infof("GET to %s", url)
resp, err := m.httpClient.Do(req.WithContext(ctx))
if err != nil {
return nil, false, err
}
if resp.StatusCode == http.StatusNotFound {
return nil, false, nil
}
obj, _, err = parseHTTPResponse(resp)
if err != nil {
return nil, false, err
}
return obj, true, nil
}
var ErrUpdateImmutable = errors.New("cannot update immutable. Use -force to delete and recreate")
func ErrImmutableRessource(attribute string, obj runtime.Object) error {
return fmt.Errorf("failed to update %s of resource %s: %w", attribute, obj.GetObjectKind().GroupVersionKind().String(), ErrUpdateImmutable)
}
// mergeObjects merges the fields from the live object to the new
// object such as resource version and clusterIP.
// TODO(jon.yucel): Instead of selectively picking fields, holisticly
// solving this problem requires three-way merge implementation.
func mergeObjects(live, obj runtime.Object) error {
// Service's clusterIP needs to be re-set to the value provided
// by controller or mutation will be denied.
if liveSvc, ok := live.(*corev1.Service); ok {
svc := obj.(*corev1.Service)
svc.Spec.ClusterIP = liveSvc.Spec.ClusterIP
gotPort := liveSvc.Spec.HealthCheckNodePort
wantPort := svc.Spec.HealthCheckNodePort
// If port is set (non-zero) and doesn't match the existing port (also non-zero), error out.
if wantPort != 0 && gotPort != 0 && wantPort != gotPort {
return ErrImmutableRessource(".spec.healthCheckNodePort", obj)
}
svc.Spec.HealthCheckNodePort = gotPort
}
if liveClusterRoleBinding, ok := live.(*rbacv1.ClusterRoleBinding); ok {
clusterRoleBinding := obj.(*rbacv1.ClusterRoleBinding)
if liveClusterRoleBinding.RoleRef.APIGroup != clusterRoleBinding.RoleRef.APIGroup ||
liveClusterRoleBinding.RoleRef.Kind != clusterRoleBinding.RoleRef.Kind ||
liveClusterRoleBinding.RoleRef.Name != clusterRoleBinding.RoleRef.Name {
return ErrImmutableRessource("roleRef", obj)
}
}
// Set metadata.resourceVersion for updates as required by
// Kubernetes API (http://go/k8s-concurrency).
if gotRV := live.(metav1.Object).GetResourceVersion(); gotRV != "" {
obj.(metav1.Object).SetResourceVersion(gotRV)
}
return nil
}
// maybeRecreate can be called to check if a resource can be updated or
// is immutable and needs recreation.
// It evaluates if resource should be forcefully recreated. In that case
// the resource will be deleted and recreated. If the -force flag is not
// enabled and an immutable resource should be updated, an error is thrown
// and no resources will get deleted.
func maybeRecreate(ctx context.Context, live, obj runtime.Object, m *kubePackage, r *apiResource) error {
err := mergeObjects(live, obj)
if errors.Is(errors.Unwrap(err), ErrUpdateImmutable) && m.force {
if m.dryRun {
fmt.Fprintf(os.Stdout, "\n\n**WARNING** %s %s is immutable and will be deleted and recreated.\n", strings.ToLower(r.GVK.Kind), maybeNamespaced(r.Name, r.Namespace))
}
// kubeDelete() already properly handles a dry run, so the resource won't be deleted if -force is set, but in dry run mode
if err := m.kubeDelete(ctx, r, true); err != nil {
return err
}
} else if err != nil {
return err
}
return nil
}
// kubeUpdate creates or overwrites object in Kubernetes.
// Path is computed based on msg type, name and (optional) namespace (these must
// not conflict with name and namespace set in object metadata).
func (m *kubePackage) kubeUpdate(ctx context.Context, r *apiResource, msg proto.Message) error {
uri := r.PathWithName()
live, found, err := m.kubePeek(ctx, m.Master+uri)
if err != nil {
return err
}
method := http.MethodPut
if found {
// Reset uri in case subresource update is requested.
uri = r.PathWithSubresource()
if err := maybeRecreate(ctx, live, msg.(runtime.Object), m, r); err != nil {
return err
}
} else { // Object doesn't exist so create it.
if r.Subresource != "" {
return errors.New("parent resource does not exist")
}
method = http.MethodPost
uri = r.Path()
}
bs, err := marshal(msg, r.GVK)
if err != nil {
return err
}
url := m.Master + uri
// Set body type as marshaled Protobuf.
// TODO(dmitry-ilyevskiy): Will not work for CRDs (only json encoding
// is supported) so the user will have to indicate this is a
// non-standard type (or we could deduce that ourselves).
contentType := "application/vnd.kubernetes.protobuf"
req, err := http.NewRequest(method, url, bytes.NewReader(bs))
if err != nil {
return err
}
req.Header.Set("Content-Type", contentType)
log.V(1).Infof("%s to %s", method, url)
if log.V(2) {
s, err := renderObj(msg.(runtime.Object), &r.GVK, bool(log.V(3)) /* If --v=3, only return JSON. */, m.diffFilters)
if err != nil {
return fmt.Errorf("failed to render :live object for %s: %v", r.String(), err)
}
log.Infof("%s:\n%s", r.String(), s)
}
if m.diff | {
if err := printUnifiedDiff(os.Stdout, live, msg.(runtime.Object), r.GVK, maybeNamespaced(r.Name, r.Namespace), m.diffFilters); err != nil {
return err
}
} | conditional_block |
|
kube.go | Ctx.Attrs["addon_version"])
if err != nil {
return err
}
if len(version) >= 2 && version[0] == '"' && version[len(version)-1] == '"' {
ls["addon_version"] = string(version[1 : len(version)-1])
}
}
if err := a.SetLabels(obj, ls); err != nil {
return err
}
as, err := a.Annotations(obj)
if err != nil {
return err
}
if as == nil {
as = map[string]string{}
}
bs, err := json.Marshal(tCtx.Attrs)
if err != nil {
return err
}
as[ctxAnnotationKey] = string(bs)
return a.SetAnnotations(obj, as)
}
func getResourceAndName(resArg starlark.Tuple) (resource, name string, err error) {
resourceArg, ok := resArg[0].(starlark.String)
if !ok {
err = errors.New("expected string for resource")
return
}
resource = string(resourceArg)
nameArg, ok := resArg[1].(starlark.String)
if !ok {
err = errors.New("expected string for resource name")
return
}
name = string(nameArg)
return
}
// kubePutFn is entry point for `kube.put' callable.
// TODO(dmitry-ilyevskiy): Return Status object from the response as Starlark dict.
func (m *kubePackage) kubePutFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var name, namespace, apiGroup, subresource string
data := &starlark.List{}
unpacked := []interface{}{
"name", &name,
"data", &data,
"namespace?", &namespace,
// TODO(dmitry-ilyevskiy): Remove this when https://github.com/stripe/skycfg/issues/14
// is resolved upstream.
"api_group?", &apiGroup,
"subresource?", &subresource,
}
if err := starlark.UnpackArgs(b.Name(), args, kwargs, unpacked...); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
for i := 0; i < data.Len(); i++ {
maybeMsg := data.Index(i)
msg, ok := skycfg.AsProtoMessage(maybeMsg)
if !ok {
return nil, fmt.Errorf("<%v>: item %d is not a protobuf type. got: %s", b.Name(), i, maybeMsg.Type())
}
sCtx := t.Local(addon.SkyCtxKey).(*addon.SkyCtx)
if err := m.setMetadata(sCtx, name, namespace, msg.(runtime.Object)); err != nil {
return nil, fmt.Errorf("<%v>: failed to validate/apply metadata for object %d => %v: %v", b.Name(), i, maybeMsg.Type(), err)
}
r, err := newResourceForMsg(m.dClient, name, namespace, apiGroup, subresource, msg)
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
}
ctx := t.Local(addon.GoCtxKey).(context.Context)
if err := m.kubeUpdate(ctx, r, msg); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
}
return starlark.None, nil
}
// kubeDeleteFn is entry point for `kube.delete' callable.
// TODO(dmitry-ilyevskiy): Return Status object from the response as Starlark dict.
func (m *kubePackage) kubeDeleteFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) | namespace = ss[0]
name = ss[1]
}
}
// Optional api_group argument.
var apiGroup starlark.String
var foreground starlark.Bool
for _, kv := range kwargs[1:] {
switch string(kv[0].(starlark.String)) {
case apiGroupKW:
var ok bool
if apiGroup, ok = kv[1].(starlark.String); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `%s' arg, got: %s", b.Name(), apiGroupKW, kv[1].Type())
}
case "foreground":
var ok bool
if foreground, ok = kv[1].(starlark.Bool); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `foreground' arg, got: %s", b.Name(), kv[1].Type())
}
default:
return nil, fmt.Errorf("<%v>: expected `api_group' or `foreground', got: %v=%v", b.Name(), kv[0], kv[1])
}
}
r, err := newResource(m.dClient, name, namespace, string(apiGroup), resource, "")
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
}
ctx := t.Local(addon.GoCtxKey).(context.Context)
if err := m.kubeDelete(ctx, r, bool(foreground)); err != nil {
return nil, fmt.Errorf("<%v>: %v", b.Name(), err)
}
return starlark.None, nil
}
// kubeGetFn is an entry point for `kube.get` built-in.
func (m *kubePackage) kubeGetFn(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(args) != 0 {
return nil, fmt.Errorf("<%v>: positional args not supported: %v", b.Name(), args)
}
if len(kwargs) < 1 {
return nil, fmt.Errorf("<%v>: expected <resource>=<name>", b.Name())
}
resource, name, err := getResourceAndName(kwargs[0])
if err != nil {
return nil, fmt.Errorf("<%v>: %s", b.Name(), err.Error())
}
// If resource is not namespace itself (special case), attempt to parse
// namespace out of the arg value.
var namespace string
if resource != namespaceResrc {
ss := strings.Split(name, "/")
if len(ss) > 1 {
namespace = ss[0]
name = ss[1]
}
}
// Optional api_group argument.
var apiGroup starlark.String
var wait = 30 * time.Second
var wantJSON bool
for _, kv := range kwargs[1:] {
switch string(kv[0].(starlark.String)) {
case apiGroupKW:
var ok bool
if apiGroup, ok = kv[1].(starlark.String); !ok {
return nil, fmt.Errorf("<%v>: expected string value for `%s' arg, got: %s", b.Name(), apiGroupKW, kv[1].Type())
}
case "wait":
durStr, ok := kv[1].(starlark.String)
if !ok {
return nil, fmt.Errorf("<%v>: expected string value for `wait' arg, got: %s", b.Name(), kv[1].Type())
}
var err error
if wait, err = time.ParseDuration(string(durStr)); err != nil {
return nil, fmt.Errorf("<%v>: failed to parse duration value: %v", b.Name(), err)
}
case "json":
bv, ok := kv[1].(starlark.Bool)
if !ok {
return nil, fmt.Errorf("<%v>: expected boolean value for `json' arg, got: %s", b.Name(), kv[1].Type())
}
wantJSON = bool(bv)
default:
return nil, fmt.Errorf("<%v>: expected one of [ api_group | wait | json ] args, got: %v=%v", b.Name(), kv[0], kv[1])
}
}
r, err := newResource(m.dClient, name, namespace, string(apiGroup), resource, "")
if err != nil {
return nil, fmt.Errorf("<%v>: failed to map resource: %v", b.Name(), err)
| {
if len(args) != 0 {
return nil, fmt.Errorf("<%v>: positional args not supported by `kube.delete': %v", b.Name(), args)
}
if len(kwargs) < 1 {
return nil, fmt.Errorf("<%v>: expected at least <resource>=<name>", b.Name())
}
resource, name, err := getResourceAndName(kwargs[0])
if err != nil {
return nil, fmt.Errorf("<%v>: %s", b.Name(), err.Error())
}
// If resource is not namespace itself (special case) attempt to parse
// namespace out of the arg value.
var namespace string
if resource != namespaceResrc {
ss := strings.Split(name, "/")
if len(ss) > 1 { | identifier_body |
run_model.py | plt.ylabel("backwater lengths")
cem = Cem()
raf = Rafem()
waves = Waves()
args = cem.setup("_run_cem", number_of_cols=120, number_of_rows=100, grid_spacing=100.0)
cem.initialize(*args)
args = raf.setup(
"_run_rafem",
n_cols=120,
n_rows=100,
dy=0.1,
dx=0.1,
time_step= 0.05, # timestep (days)
sea_level_rise_rate=0.0,
channel_discharge=10.0,
upstream_elevation=5.0,
random_seed=1111,
saveavulsions=True,
)
raf.initialize(*args)
args = waves.setup("_run_waves")
waves.initialize(*args)
set(raf.get_output_var_names()) & set(cem.get_input_var_names())
z = raf.get_value("land_surface__elevation")
raf.set_value("land_surface__elevation", z)
cem.set_value("land_surface__elevation", z)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_asymmetry_parameter",
0.5,
)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_highness_parameter",
0.3,
)
cem.set_value("sea_surface_water_wave__height", 0.1)
cem.set_value("sea_surface_water_wave__period", 9.0)
### set CEM wave angle if not updating waves ###
# cem.set_value("sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", 0. * np.pi / 180.)
grid_id = cem.get_var_grid("land_surface__elevation")
spacing = cem.get_grid_spacing(grid_id)
shape = cem.get_grid_shape(grid_id)
z0 = raf.get_value("land_surface__elevation").reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / 1000
riv_y = raf.get_value("channel_centerline__y_coordinate") / 1000
qs = np.zeros_like(z0)
flux_array = np.zeros(2, dtype=np.float)
RIVER_WIDTH = dict(raf.parameters)["channel_width"] # Convert unit-width flux to flux
RHO_SED = 2650.0 # Used to convert volume flux to mass flux
TIME_STEP = raf.time_step
Tcf = 1000000 / 86400
dx = (dict(raf.parameters)["dy"]) * 1000.0
slope = dict(raf.parameters)["initial_slope"]
# slope = dict(raf.parameters)['delta_slope']
max_cell_h = dx * slope
channel_depth = dict(raf.parameters)["channel_depth"]
max_rand = 0.0001
if not os.path.exists("output_data"):
os.mkdir("output_data")
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# make directories to save run data
# if not os.path.exists("output_data/elev_grid"):
# os.mkdir("output_data/elev_grid")
if not os.path.exists("output_data/riv_course"):
os.mkdir("output_data/riv_course")
# if not os.path.exists("output_data/riv_profile"):
# os.mkdir("output_data/riv_profile")
if not os.path.exists("output_data/elev_figs"):
os.mkdir("output_data/elev_figs")
if not os.path.exists("output_data/prof_figs"):
os.mkdir("output_data/prof_figs")
if not os.path.exists("output_data/rel_elev"):
os.mkdir("output_data/rel_elev")
for time in np.arange(0, N_DAYS, TIME_STEP):
raf.update_until(time)
nyears = float(time / 365.0)
sea_level = raf.get_value("sea_water_surface__elevation")
# Get and set sediment flux at the river mouth
raf_qs = raf.get_value("channel_exit_water_sediment~bedload__volume_flow_rate")
y, x = (
raf.get_value("channel_exit__y_coordinate"),
raf.get_value("channel_exit__x_coordinate"),
)
qs[int(y[0] / spacing[0]), int(x[0] / spacing[1])] = (
raf_qs[0] * RIVER_WIDTH * RHO_SED
)
if Save_Fluxes:
with open("output_data/fluxes.out", "a") as file:
file.write("%.2f %.5f \n" % (time, raf_qs[0] * RIVER_WIDTH * RHO_SED))
cem.set_value("land_surface_water_sediment~bedload__mass_flow_rate", qs)
# Get and set elevations from Rafem to CEM
raf_z = (raf.get_value("land_surface__elevation") - sea_level).reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / dx
riv_y = raf.get_value("channel_centerline__y_coordinate") / dx
riv_i = riv_x.astype(int)
riv_j = riv_y.astype(int)
prof_elev = raf_z[riv_j, riv_i]
raf_z[riv_j, riv_i] += channel_depth
# divide subaerial cells by max_cell_h to convert to percent full
raf_z[raf_z > 0] /= max_cell_h
# fix river elevations before passing
mouth_cell_count = 0
for k in reversed(range(riv_x.size)):
if raf_z[riv_j[k], riv_i[k]] < 1:
if mouth_cell_count < 1:
mouth_cell_count += 1
else:
raf_z[riv_j[k], riv_i[k]] = 1
raf_z.reshape(shape[0] * shape[1])
cem.set_value("land_surface__elevation", raf_z)
# update wave climate
waves.update()
angle = waves.get_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity"
)
cem.set_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", angle
)
cem.update_until(time)
# Get and set elevations from CEM to Rafem
cem_z = cem.get_value("land_surface__elevation").reshape(shape)
cem_z[cem_z > 0] *= max_cell_h
# reset river elevations back for Rafem
# cem_z[[riv_j, riv_i] >= 0] -= channel_depth
if cem_z[riv_j[-1], riv_i[-1]] > 0:
cem_z[riv_j[:-1], riv_i[:-1]] = prof_elev[:-1]
cem_z[riv_j[-1], riv_i[-1]] -= channel_depth
else:
cem_z[riv_j[:-2], riv_i[:-2]] = prof_elev[:-2]
cem_z[riv_j[-2], riv_i[-2]] -= channel_depth
cem_z.reshape(shape[0] * shape[1])
raf.set_value("land_surface__elevation", cem_z + sea_level)
qs.fill(0)
if time % save_int == 0:
print("time = %.3f days" % time)
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# save outputs
z = raf.get_value("land_surface__elevation").reshape(shape)
rel_z = z - sea_level
x = raf.get_value("channel_centerline__x_coordinate")
y = raf.get_value("channel_centerline__y_coordinate")
prof = raf.get_value("channel_centerline__elevation")
real_prof = rel_z[(y / dx).astype(int), (x / dx).astype(int)]
river_x = x / 1000
river_y = y / 1000
riv_left = z[y.astype(int) // 100, (x.astype(int) // 100) + | import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
land = plt.cm.terrain(np.linspace(0.4, 1, 128))
ocean = plt.cm.ocean(np.linspace(0.5, 0.8, 128))
colors = np.vstack((ocean, land))
m = LinearSegmentedColormap.from_list("land_ocean", colors)
(x, y) = np.meshgrid(
np.arange(z.shape[0]) * spacing[0],
np.arange(z.shape[1]) * spacing[1],
indexing="ij",
)
plt.pcolormesh(y * 1e-3, x * 1e-3, z, cmap=m, vmin=-50, vmax=50)
plt.gca().set_aspect(1.0)
plt.axis([0, 12, 0, 10])
# plt.colorbar(orientation='horizontal').ax.set_xlabel('Elevation (m)')
plt.xlabel("backwater lengths") | identifier_body |
|
run_model.py | (spacing, z):
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
land = plt.cm.terrain(np.linspace(0.4, 1, 128))
ocean = plt.cm.ocean(np.linspace(0.5, 0.8, 128))
colors = np.vstack((ocean, land))
m = LinearSegmentedColormap.from_list("land_ocean", colors)
(x, y) = np.meshgrid(
np.arange(z.shape[0]) * spacing[0],
np.arange(z.shape[1]) * spacing[1],
indexing="ij",
)
plt.pcolormesh(y * 1e-3, x * 1e-3, z, cmap=m, vmin=-50, vmax=50)
plt.gca().set_aspect(1.0)
plt.axis([0, 12, 0, 10])
# plt.colorbar(orientation='horizontal').ax.set_xlabel('Elevation (m)')
plt.xlabel("backwater lengths")
plt.ylabel("backwater lengths")
cem = Cem()
raf = Rafem()
waves = Waves()
args = cem.setup("_run_cem", number_of_cols=120, number_of_rows=100, grid_spacing=100.0)
cem.initialize(*args)
args = raf.setup(
"_run_rafem",
n_cols=120,
n_rows=100,
dy=0.1,
dx=0.1,
time_step= 0.05, # timestep (days)
sea_level_rise_rate=0.0,
channel_discharge=10.0,
upstream_elevation=5.0,
random_seed=1111,
saveavulsions=True,
)
raf.initialize(*args)
args = waves.setup("_run_waves")
waves.initialize(*args)
set(raf.get_output_var_names()) & set(cem.get_input_var_names())
z = raf.get_value("land_surface__elevation")
raf.set_value("land_surface__elevation", z)
cem.set_value("land_surface__elevation", z)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_asymmetry_parameter",
0.5,
)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_highness_parameter",
0.3,
)
cem.set_value("sea_surface_water_wave__height", 0.1)
cem.set_value("sea_surface_water_wave__period", 9.0)
### set CEM wave angle if not updating waves ###
# cem.set_value("sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", 0. * np.pi / 180.)
grid_id = cem.get_var_grid("land_surface__elevation")
spacing = cem.get_grid_spacing(grid_id)
shape = cem.get_grid_shape(grid_id)
z0 = raf.get_value("land_surface__elevation").reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / 1000
riv_y = raf.get_value("channel_centerline__y_coordinate") / 1000
qs = np.zeros_like(z0)
flux_array = np.zeros(2, dtype=np.float)
RIVER_WIDTH = dict(raf.parameters)["channel_width"] # Convert unit-width flux to flux
RHO_SED = 2650.0 # Used to convert volume flux to mass flux
TIME_STEP = raf.time_step
Tcf = 1000000 / 86400
dx = (dict(raf.parameters)["dy"]) * 1000.0
slope = dict(raf.parameters)["initial_slope"]
# slope = dict(raf.parameters)['delta_slope']
max_cell_h = dx * slope
channel_depth = dict(raf.parameters)["channel_depth"]
max_rand = 0.0001
if not os.path.exists("output_data"):
os.mkdir("output_data")
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# make directories to save run data
# if not os.path.exists("output_data/elev_grid"):
# os.mkdir("output_data/elev_grid")
if not os.path.exists("output_data/riv_course"):
os.mkdir("output_data/riv_course")
# if not os.path.exists("output_data/riv_profile"):
# os.mkdir("output_data/riv_profile")
if not os.path.exists("output_data/elev_figs"):
os.mkdir("output_data/elev_figs")
if not os.path.exists("output_data/prof_figs"):
os.mkdir("output_data/prof_figs")
if not os.path.exists("output_data/rel_elev"):
os.mkdir("output_data/rel_elev")
for time in np.arange(0, N_DAYS, TIME_STEP):
raf.update_until(time)
nyears = float(time / 365.0)
sea_level = raf.get_value("sea_water_surface__elevation")
# Get and set sediment flux at the river mouth
raf_qs = raf.get_value("channel_exit_water_sediment~bedload__volume_flow_rate")
y, x = (
raf.get_value("channel_exit__y_coordinate"),
raf.get_value("channel_exit__x_coordinate"),
)
qs[int(y[0] / spacing[0]), int(x[0] / spacing[1])] = (
raf_qs[0] * RIVER_WIDTH * RHO_SED
)
if Save_Fluxes:
with open("output_data/fluxes.out", "a") as file:
file.write("%.2f %.5f \n" % (time, raf_qs[0] * RIVER_WIDTH * RHO_SED))
cem.set_value("land_surface_water_sediment~bedload__mass_flow_rate", qs)
# Get and set elevations from Rafem to CEM
raf_z = (raf.get_value("land_surface__elevation") - sea_level).reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / dx
riv_y = raf.get_value("channel_centerline__y_coordinate") / dx
riv_i = riv_x.astype(int)
riv_j = riv_y.astype(int)
prof_elev = raf_z[riv_j, riv_i]
raf_z[riv_j, riv_i] += channel_depth
# divide subaerial cells by max_cell_h to convert to percent full
raf_z[raf_z > 0] /= max_cell_h
# fix river elevations before passing
mouth_cell_count = 0
for k in reversed(range(riv_x.size)):
if raf_z[riv_j[k], riv_i[k]] < 1:
if mouth_cell_count < 1:
mouth_cell_count += 1
else:
raf_z[riv_j[k], riv_i[k]] = 1
raf_z.reshape(shape[0] * shape[1])
cem.set_value("land_surface__elevation", raf_z)
# update wave climate
waves.update()
angle = waves.get_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity"
)
cem.set_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", angle
)
cem.update_until(time)
# Get and set elevations from CEM to Rafem
cem_z = cem.get_value("land_surface__elevation").reshape(shape)
cem_z[cem_z > 0] *= max_cell_h
# reset river elevations back for Rafem
# cem_z[[riv_j, riv_i] >= 0] -= channel_depth
if cem_z[riv_j[-1], riv_i[-1]] > 0:
cem_z[riv_j[:-1], riv_i[:-1]] = prof_elev[:-1]
cem_z[riv_j[-1], riv_i[-1]] -= channel_depth
else:
cem_z[riv_j[:-2], riv_i[:-2]] = prof_elev[:-2]
cem_z[riv_j[-2], riv_i[-2]] -= channel_depth
cem_z.reshape(shape[0] * shape[1])
raf.set_value("land_surface__elevation", cem_z + sea_level)
qs.fill(0)
if time % save_int == 0:
print("time = %.3f days" % time)
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# save outputs
z = raf.get_value("land_surface__elevation").reshape(shape)
rel_z = z - sea_level
x = raf.get_value("channel_centerline__x_coordinate")
y = raf.get_value("channel_centerline__y_coordinate")
prof = raf.get_value("channel_centerline__elevation")
real_prof = rel_z[(y / dx).astype(int), (x / dx).astype(int)]
river_x = x / 1000
river_y = y / 1000
riv_left = z[y.astype(int) // 100, (x.astype | plot_coast | identifier_name |
|
run_model.py | , 0, 10])
# plt.colorbar(orientation='horizontal').ax.set_xlabel('Elevation (m)')
plt.xlabel("backwater lengths")
plt.ylabel("backwater lengths")
cem = Cem()
raf = Rafem()
waves = Waves()
args = cem.setup("_run_cem", number_of_cols=120, number_of_rows=100, grid_spacing=100.0)
cem.initialize(*args)
args = raf.setup(
"_run_rafem",
n_cols=120,
n_rows=100,
dy=0.1,
dx=0.1,
time_step= 0.05, # timestep (days)
sea_level_rise_rate=0.0,
channel_discharge=10.0,
upstream_elevation=5.0,
random_seed=1111,
saveavulsions=True,
)
raf.initialize(*args)
args = waves.setup("_run_waves")
waves.initialize(*args)
set(raf.get_output_var_names()) & set(cem.get_input_var_names())
z = raf.get_value("land_surface__elevation")
raf.set_value("land_surface__elevation", z)
cem.set_value("land_surface__elevation", z)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_asymmetry_parameter",
0.5,
)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_highness_parameter",
0.3,
)
cem.set_value("sea_surface_water_wave__height", 0.1)
cem.set_value("sea_surface_water_wave__period", 9.0)
### set CEM wave angle if not updating waves ###
# cem.set_value("sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", 0. * np.pi / 180.)
grid_id = cem.get_var_grid("land_surface__elevation")
spacing = cem.get_grid_spacing(grid_id)
shape = cem.get_grid_shape(grid_id)
z0 = raf.get_value("land_surface__elevation").reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / 1000
riv_y = raf.get_value("channel_centerline__y_coordinate") / 1000
qs = np.zeros_like(z0)
flux_array = np.zeros(2, dtype=np.float)
RIVER_WIDTH = dict(raf.parameters)["channel_width"] # Convert unit-width flux to flux
RHO_SED = 2650.0 # Used to convert volume flux to mass flux
TIME_STEP = raf.time_step
Tcf = 1000000 / 86400
dx = (dict(raf.parameters)["dy"]) * 1000.0
slope = dict(raf.parameters)["initial_slope"]
# slope = dict(raf.parameters)['delta_slope']
max_cell_h = dx * slope
channel_depth = dict(raf.parameters)["channel_depth"]
max_rand = 0.0001
if not os.path.exists("output_data"):
os.mkdir("output_data")
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# make directories to save run data
# if not os.path.exists("output_data/elev_grid"):
# os.mkdir("output_data/elev_grid")
if not os.path.exists("output_data/riv_course"):
os.mkdir("output_data/riv_course")
# if not os.path.exists("output_data/riv_profile"):
# os.mkdir("output_data/riv_profile")
if not os.path.exists("output_data/elev_figs"):
os.mkdir("output_data/elev_figs")
if not os.path.exists("output_data/prof_figs"):
os.mkdir("output_data/prof_figs")
if not os.path.exists("output_data/rel_elev"):
os.mkdir("output_data/rel_elev")
for time in np.arange(0, N_DAYS, TIME_STEP):
raf.update_until(time)
nyears = float(time / 365.0)
sea_level = raf.get_value("sea_water_surface__elevation")
# Get and set sediment flux at the river mouth
raf_qs = raf.get_value("channel_exit_water_sediment~bedload__volume_flow_rate")
y, x = (
raf.get_value("channel_exit__y_coordinate"),
raf.get_value("channel_exit__x_coordinate"),
)
qs[int(y[0] / spacing[0]), int(x[0] / spacing[1])] = (
raf_qs[0] * RIVER_WIDTH * RHO_SED
)
if Save_Fluxes:
with open("output_data/fluxes.out", "a") as file:
file.write("%.2f %.5f \n" % (time, raf_qs[0] * RIVER_WIDTH * RHO_SED))
cem.set_value("land_surface_water_sediment~bedload__mass_flow_rate", qs)
# Get and set elevations from Rafem to CEM
raf_z = (raf.get_value("land_surface__elevation") - sea_level).reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / dx
riv_y = raf.get_value("channel_centerline__y_coordinate") / dx
riv_i = riv_x.astype(int)
riv_j = riv_y.astype(int)
prof_elev = raf_z[riv_j, riv_i]
raf_z[riv_j, riv_i] += channel_depth
# divide subaerial cells by max_cell_h to convert to percent full
raf_z[raf_z > 0] /= max_cell_h
# fix river elevations before passing
mouth_cell_count = 0
for k in reversed(range(riv_x.size)):
|
raf_z.reshape(shape[0] * shape[1])
cem.set_value("land_surface__elevation", raf_z)
# update wave climate
waves.update()
angle = waves.get_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity"
)
cem.set_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", angle
)
cem.update_until(time)
# Get and set elevations from CEM to Rafem
cem_z = cem.get_value("land_surface__elevation").reshape(shape)
cem_z[cem_z > 0] *= max_cell_h
# reset river elevations back for Rafem
# cem_z[[riv_j, riv_i] >= 0] -= channel_depth
if cem_z[riv_j[-1], riv_i[-1]] > 0:
cem_z[riv_j[:-1], riv_i[:-1]] = prof_elev[:-1]
cem_z[riv_j[-1], riv_i[-1]] -= channel_depth
else:
cem_z[riv_j[:-2], riv_i[:-2]] = prof_elev[:-2]
cem_z[riv_j[-2], riv_i[-2]] -= channel_depth
cem_z.reshape(shape[0] * shape[1])
raf.set_value("land_surface__elevation", cem_z + sea_level)
qs.fill(0)
if time % save_int == 0:
print("time = %.3f days" % time)
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# save outputs
z = raf.get_value("land_surface__elevation").reshape(shape)
rel_z = z - sea_level
x = raf.get_value("channel_centerline__x_coordinate")
y = raf.get_value("channel_centerline__y_coordinate")
prof = raf.get_value("channel_centerline__elevation")
real_prof = rel_z[(y / dx).astype(int), (x / dx).astype(int)]
river_x = x / 1000
river_y = y / 1000
riv_left = z[y.astype(int) // 100, (x.astype(int) // 100) + 1]
riv_right = z[y.astype(int) // 100, (x.astype(int) // 100) - 1]
riv_left[riv_left < sea_level] = sea_level
riv_right[riv_right < sea_level] = sea_level
Tcf_time = time / Tcf
### SAVE DAILY TIMESTEPS ###
##########################################################################################
if Save_Daily_Timesteps == 1:
# np.savetxt('output_data/elev_grid/elev_'+str("%.3f" % nyears)+'.out',z,fmt='%.5f')
np.savetxt(
"output_data/rel_elev/rel_elev_" + str("%i" % time) + ".out",
rel_z,
fmt="%.5f",
)
np.savetxt(
"output_data/riv_course/riv_" + str("%i" % time | if raf_z[riv_j[k], riv_i[k]] < 1:
if mouth_cell_count < 1:
mouth_cell_count += 1
else:
raf_z[riv_j[k], riv_i[k]] = 1 | conditional_block |
run_model.py | , 0, 10])
# plt.colorbar(orientation='horizontal').ax.set_xlabel('Elevation (m)')
plt.xlabel("backwater lengths")
plt.ylabel("backwater lengths")
cem = Cem()
raf = Rafem()
waves = Waves()
args = cem.setup("_run_cem", number_of_cols=120, number_of_rows=100, grid_spacing=100.0)
cem.initialize(*args)
args = raf.setup(
"_run_rafem",
n_cols=120,
n_rows=100,
dy=0.1,
dx=0.1,
time_step= 0.05, # timestep (days)
sea_level_rise_rate=0.0,
channel_discharge=10.0,
upstream_elevation=5.0,
random_seed=1111,
saveavulsions=True,
)
raf.initialize(*args)
args = waves.setup("_run_waves")
waves.initialize(*args)
set(raf.get_output_var_names()) & set(cem.get_input_var_names())
z = raf.get_value("land_surface__elevation")
raf.set_value("land_surface__elevation", z)
cem.set_value("land_surface__elevation", z)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_asymmetry_parameter",
0.5,
)
waves.set_value(
"sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_highness_parameter",
0.3,
)
cem.set_value("sea_surface_water_wave__height", 0.1) |
### set CEM wave angle if not updating waves ###
# cem.set_value("sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", 0. * np.pi / 180.)
grid_id = cem.get_var_grid("land_surface__elevation")
spacing = cem.get_grid_spacing(grid_id)
shape = cem.get_grid_shape(grid_id)
z0 = raf.get_value("land_surface__elevation").reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / 1000
riv_y = raf.get_value("channel_centerline__y_coordinate") / 1000
qs = np.zeros_like(z0)
flux_array = np.zeros(2, dtype=np.float)
RIVER_WIDTH = dict(raf.parameters)["channel_width"] # Convert unit-width flux to flux
RHO_SED = 2650.0 # Used to convert volume flux to mass flux
TIME_STEP = raf.time_step
Tcf = 1000000 / 86400
dx = (dict(raf.parameters)["dy"]) * 1000.0
slope = dict(raf.parameters)["initial_slope"]
# slope = dict(raf.parameters)['delta_slope']
max_cell_h = dx * slope
channel_depth = dict(raf.parameters)["channel_depth"]
max_rand = 0.0001
if not os.path.exists("output_data"):
os.mkdir("output_data")
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# make directories to save run data
# if not os.path.exists("output_data/elev_grid"):
# os.mkdir("output_data/elev_grid")
if not os.path.exists("output_data/riv_course"):
os.mkdir("output_data/riv_course")
# if not os.path.exists("output_data/riv_profile"):
# os.mkdir("output_data/riv_profile")
if not os.path.exists("output_data/elev_figs"):
os.mkdir("output_data/elev_figs")
if not os.path.exists("output_data/prof_figs"):
os.mkdir("output_data/prof_figs")
if not os.path.exists("output_data/rel_elev"):
os.mkdir("output_data/rel_elev")
for time in np.arange(0, N_DAYS, TIME_STEP):
raf.update_until(time)
nyears = float(time / 365.0)
sea_level = raf.get_value("sea_water_surface__elevation")
# Get and set sediment flux at the river mouth
raf_qs = raf.get_value("channel_exit_water_sediment~bedload__volume_flow_rate")
y, x = (
raf.get_value("channel_exit__y_coordinate"),
raf.get_value("channel_exit__x_coordinate"),
)
qs[int(y[0] / spacing[0]), int(x[0] / spacing[1])] = (
raf_qs[0] * RIVER_WIDTH * RHO_SED
)
if Save_Fluxes:
with open("output_data/fluxes.out", "a") as file:
file.write("%.2f %.5f \n" % (time, raf_qs[0] * RIVER_WIDTH * RHO_SED))
cem.set_value("land_surface_water_sediment~bedload__mass_flow_rate", qs)
# Get and set elevations from Rafem to CEM
raf_z = (raf.get_value("land_surface__elevation") - sea_level).reshape(shape)
riv_x = raf.get_value("channel_centerline__x_coordinate") / dx
riv_y = raf.get_value("channel_centerline__y_coordinate") / dx
riv_i = riv_x.astype(int)
riv_j = riv_y.astype(int)
prof_elev = raf_z[riv_j, riv_i]
raf_z[riv_j, riv_i] += channel_depth
# divide subaerial cells by max_cell_h to convert to percent full
raf_z[raf_z > 0] /= max_cell_h
# fix river elevations before passing
mouth_cell_count = 0
for k in reversed(range(riv_x.size)):
if raf_z[riv_j[k], riv_i[k]] < 1:
if mouth_cell_count < 1:
mouth_cell_count += 1
else:
raf_z[riv_j[k], riv_i[k]] = 1
raf_z.reshape(shape[0] * shape[1])
cem.set_value("land_surface__elevation", raf_z)
# update wave climate
waves.update()
angle = waves.get_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity"
)
cem.set_value(
"sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity", angle
)
cem.update_until(time)
# Get and set elevations from CEM to Rafem
cem_z = cem.get_value("land_surface__elevation").reshape(shape)
cem_z[cem_z > 0] *= max_cell_h
# reset river elevations back for Rafem
# cem_z[[riv_j, riv_i] >= 0] -= channel_depth
if cem_z[riv_j[-1], riv_i[-1]] > 0:
cem_z[riv_j[:-1], riv_i[:-1]] = prof_elev[:-1]
cem_z[riv_j[-1], riv_i[-1]] -= channel_depth
else:
cem_z[riv_j[:-2], riv_i[:-2]] = prof_elev[:-2]
cem_z[riv_j[-2], riv_i[-2]] -= channel_depth
cem_z.reshape(shape[0] * shape[1])
raf.set_value("land_surface__elevation", cem_z + sea_level)
qs.fill(0)
if time % save_int == 0:
print("time = %.3f days" % time)
if Save_Daily_Timesteps or Save_Yearly_Timesteps:
# save outputs
z = raf.get_value("land_surface__elevation").reshape(shape)
rel_z = z - sea_level
x = raf.get_value("channel_centerline__x_coordinate")
y = raf.get_value("channel_centerline__y_coordinate")
prof = raf.get_value("channel_centerline__elevation")
real_prof = rel_z[(y / dx).astype(int), (x / dx).astype(int)]
river_x = x / 1000
river_y = y / 1000
riv_left = z[y.astype(int) // 100, (x.astype(int) // 100) + 1]
riv_right = z[y.astype(int) // 100, (x.astype(int) // 100) - 1]
riv_left[riv_left < sea_level] = sea_level
riv_right[riv_right < sea_level] = sea_level
Tcf_time = time / Tcf
### SAVE DAILY TIMESTEPS ###
##########################################################################################
if Save_Daily_Timesteps == 1:
# np.savetxt('output_data/elev_grid/elev_'+str("%.3f" % nyears)+'.out',z,fmt='%.5f')
np.savetxt(
"output_data/rel_elev/rel_elev_" + str("%i" % time) + ".out",
rel_z,
fmt="%.5f",
)
np.savetxt(
"output_data/riv_course/riv_" + str("%i" % | cem.set_value("sea_surface_water_wave__period", 9.0) | random_line_split |
schedule.rs | ::new(0);
thread_local! {
static CURRENT_CONTEXT: RefCell<InnerContext> = {
RefCell::new(InnerContext::new())
};
}
type RequestSender = std_mpsc::Sender<Request>;
type RequestReceiver = std_mpsc::Receiver<Request>;
/// The identifier of a scheduler.
pub type SchedulerId = usize;
/// Scheduler of spawned fibers.
///
/// Scheduler manages spawned fibers state.
/// If a fiber is in runnable state (e.g., not waiting for I/O events),
/// the scheduler will push the fiber in it's run queue.
/// When `run_once` method is called, the first fiber (i.e., future) in the queue
/// will be poped and executed (i.e., `Future::poll` method is called).
/// If the future of a fiber moves to readied state,
/// it will be removed from the scheduler.
/// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread.
#[derive(Debug)]
pub struct Scheduler {
scheduler_id: SchedulerId,
next_fiber_id: fiber::FiberId,
fibers: HashMap<fiber::FiberId, fiber::FiberState>,
run_queue: VecDeque<fiber::FiberId>,
request_tx: RequestSender,
request_rx: RequestReceiver,
poller: poll::PollerHandle,
}
impl Scheduler {
/// Creates a new scheduler instance.
pub fn new(poller: poll::PollerHandle) -> Self {
let (request_tx, request_rx) = std_mpsc::channel();
Scheduler {
scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst),
next_fiber_id: 0,
fibers: HashMap::new(),
run_queue: VecDeque::new(),
request_tx,
request_rx,
poller,
}
}
/// Returns the identifier of this scheduler.
pub fn scheduler_id(&self) -> SchedulerId {
self.scheduler_id
}
/// Returns the length of the run queue of this scheduler.
pub fn run_queue_len(&self) -> usize {
self.run_queue.len()
}
/// Returns the count of alive fibers (i.e., not readied futures) in this scheduler.
pub fn fiber_count(&self) -> usize {
self.fibers.len()
}
/// Returns a handle of this scheduler.
pub fn handle(&self) -> SchedulerHandle {
SchedulerHandle {
request_tx: self.request_tx.clone(),
}
}
/// Runs one unit of works.
pub fn run_once(&mut self, block_if_idle: bool) {
let mut did_something = false;
loop {
// Request
match self.request_rx.try_recv() {
Err(std_mpsc::TryRecvError::Empty) => {}
Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(),
Ok(request) => {
did_something = true;
self.handle_request(request);
}
}
// Task
if let Some(fiber_id) = self.next_runnable() {
did_something = true;
self.run_fiber(fiber_id);
}
if !block_if_idle || did_something {
break;
}
let request = self.request_rx.recv().expect("must succeed");
did_something = true;
self.handle_request(request);
}
}
fn handle_request(&mut self, request: Request) {
match request {
Request::Spawn(task) => self.spawn_fiber(task),
Request::WakeUp(fiber_id) => {
if self.fibers.contains_key(&fiber_id) {
self.schedule(fiber_id);
}
}
}
}
fn spawn_fiber(&mut self, task: Task) {
let fiber_id = self.next_fiber_id();
self.fibers
.insert(fiber_id, fiber::FiberState::new(fiber_id, task));
self.schedule(fiber_id);
}
fn run_fiber(&mut self, fiber_id: fiber::FiberId) { | let is_runnable = {
CURRENT_CONTEXT.with(|context| {
let mut context = context.borrow_mut();
if context
.scheduler
.as_ref()
.map_or(true, |s| s.id != self.scheduler_id)
{
context.switch(self);
}
{
let scheduler = assert_some!(context.scheduler.as_mut());
if !scheduler.poller.is_alive() {
// TODO: Return `Err(io::Error)` to caller and
// handle the error in upper layers
panic!("Poller is down");
}
}
assert!(context.fiber.is_none(), "Nested schedulers");
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
context.fiber = Some(fiber as _);
});
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
finished = fiber.run_once();
CURRENT_CONTEXT.with(|context| {
context.borrow_mut().fiber = None;
});
fiber.is_runnable()
};
if finished {
self.fibers.remove(&fiber_id);
} else if is_runnable {
self.schedule(fiber_id);
}
}
fn next_fiber_id(&mut self) -> fiber::FiberId {
loop {
let id = self.next_fiber_id;
self.next_fiber_id = id.wrapping_add(1);
if !self.fibers.contains_key(&id) {
return id;
}
}
}
fn schedule(&mut self, fiber_id: fiber::FiberId) {
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
if !fiber.in_run_queue {
self.run_queue.push_back(fiber_id);
fiber.in_run_queue = true;
}
}
fn next_runnable(&mut self) -> Option<fiber::FiberId> {
while let Some(fiber_id) = self.run_queue.pop_front() {
if let Some(fiber) = self.fibers.get_mut(&fiber_id) {
fiber.in_run_queue = false;
return Some(fiber_id);
}
}
None
}
}
/// A handle of a scheduler.
#[derive(Debug, Clone)]
pub struct SchedulerHandle {
request_tx: RequestSender,
}
impl SchedulerHandle {
/// Wakes up a specified fiber in the scheduler.
///
/// This forces the fiber to be pushed to the run queue of the scheduler.
pub fn wakeup(&self, fiber_id: fiber::FiberId) {
let _ = self.request_tx.send(Request::WakeUp(fiber_id));
}
}
impl Spawn for SchedulerHandle {
fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) {
let _ = self.request_tx.send(Request::Spawn(Task(fiber)));
}
}
#[derive(Debug)]
pub struct CurrentScheduler {
pub id: SchedulerId,
pub handle: SchedulerHandle,
pub poller: poll::PollerHandle,
}
/// Calls `f` with the current execution context.
///
/// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`.
pub fn with_current_context<F, T>(f: F) -> Option<T>
where
F: FnOnce(Context) -> T,
{
CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f))
}
/// The execution context of the currently running fiber.
#[derive(Debug)]
pub struct Context<'a> {
scheduler: &'a mut CurrentScheduler,
fiber: &'a mut FiberState,
}
impl<'a> Context<'a> {
/// Returns the identifier of the current exeuction context.
pub fn context_id(&self) -> super::ContextId {
(self.scheduler.id, self.fiber.fiber_id)
}
/// Parks the current fiber.
pub fn park(&mut self) -> super::Unpark {
self.fiber
.park(self.scheduler.id, self.scheduler.handle.clone())
}
/// Returns the I/O event poller for this context.
pub fn poller(&mut self) -> &mut poll::PollerHandle {
&mut self.scheduler.poller
}
}
/// Cooperatively gives up a poll for the current future (fiber).
///
/// # Examples
///
/// ```
/// # extern crate fibers;
/// # extern crate futures;
/// use fibers::{fiber, Executor, InPlaceExecutor, Spawn};
/// use futures::{Future, Async, Poll};
///
/// struct HeavyCalculation {
/// polled_count: usize,
/// loops: usize
/// }
/// impl HeavyCalculation {
/// fn new(loop_count: usize) -> Self {
/// HeavyCalculation { polled_count: 0, loops: loop_count }
/// }
/// }
/// impl Future for HeavyCalculation {
/// type Item = usize;
/// type Error = ();
/// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
/// self.polled_count += 1;
///
/// let mut per_poll_loop_limit = 10;
/// while self.loops > 0 {
/// self.loops -= 1;
/// per_poll_loop_limit -= 1;
/// if per_poll_loop_limit == 0 {
/// // Suspends calculation and gives execution to other fibers.
/// return fiber::yield_poll();
/// }
/// }
/// Ok(Async::Ready(self.polled_count))
/// }
/// }
///
/// let mut executor = InPlaceExecutor::new(). | let finished; | random_line_split |
schedule.rs | new(0);
thread_local! {
static CURRENT_CONTEXT: RefCell<InnerContext> = {
RefCell::new(InnerContext::new())
};
}
type RequestSender = std_mpsc::Sender<Request>;
type RequestReceiver = std_mpsc::Receiver<Request>;
/// The identifier of a scheduler.
pub type SchedulerId = usize;
/// Scheduler of spawned fibers.
///
/// Scheduler manages spawned fibers state.
/// If a fiber is in runnable state (e.g., not waiting for I/O events),
/// the scheduler will push the fiber in it's run queue.
/// When `run_once` method is called, the first fiber (i.e., future) in the queue
/// will be poped and executed (i.e., `Future::poll` method is called).
/// If the future of a fiber moves to readied state,
/// it will be removed from the scheduler.
/// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread.
#[derive(Debug)]
pub struct Scheduler {
scheduler_id: SchedulerId,
next_fiber_id: fiber::FiberId,
fibers: HashMap<fiber::FiberId, fiber::FiberState>,
run_queue: VecDeque<fiber::FiberId>,
request_tx: RequestSender,
request_rx: RequestReceiver,
poller: poll::PollerHandle,
}
impl Scheduler {
/// Creates a new scheduler instance.
pub fn new(poller: poll::PollerHandle) -> Self {
let (request_tx, request_rx) = std_mpsc::channel();
Scheduler {
scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst),
next_fiber_id: 0,
fibers: HashMap::new(),
run_queue: VecDeque::new(),
request_tx,
request_rx,
poller,
}
}
/// Returns the identifier of this scheduler.
pub fn scheduler_id(&self) -> SchedulerId {
self.scheduler_id
}
/// Returns the length of the run queue of this scheduler.
pub fn run_queue_len(&self) -> usize {
self.run_queue.len()
}
/// Returns the count of alive fibers (i.e., not readied futures) in this scheduler.
pub fn fiber_count(&self) -> usize {
self.fibers.len()
}
/// Returns a handle of this scheduler.
pub fn handle(&self) -> SchedulerHandle {
SchedulerHandle {
request_tx: self.request_tx.clone(),
}
}
/// Runs one unit of works.
pub fn run_once(&mut self, block_if_idle: bool) {
let mut did_something = false;
loop {
// Request
match self.request_rx.try_recv() {
Err(std_mpsc::TryRecvError::Empty) => |
Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(),
Ok(request) => {
did_something = true;
self.handle_request(request);
}
}
// Task
if let Some(fiber_id) = self.next_runnable() {
did_something = true;
self.run_fiber(fiber_id);
}
if !block_if_idle || did_something {
break;
}
let request = self.request_rx.recv().expect("must succeed");
did_something = true;
self.handle_request(request);
}
}
fn handle_request(&mut self, request: Request) {
match request {
Request::Spawn(task) => self.spawn_fiber(task),
Request::WakeUp(fiber_id) => {
if self.fibers.contains_key(&fiber_id) {
self.schedule(fiber_id);
}
}
}
}
fn spawn_fiber(&mut self, task: Task) {
let fiber_id = self.next_fiber_id();
self.fibers
.insert(fiber_id, fiber::FiberState::new(fiber_id, task));
self.schedule(fiber_id);
}
fn run_fiber(&mut self, fiber_id: fiber::FiberId) {
let finished;
let is_runnable = {
CURRENT_CONTEXT.with(|context| {
let mut context = context.borrow_mut();
if context
.scheduler
.as_ref()
.map_or(true, |s| s.id != self.scheduler_id)
{
context.switch(self);
}
{
let scheduler = assert_some!(context.scheduler.as_mut());
if !scheduler.poller.is_alive() {
// TODO: Return `Err(io::Error)` to caller and
// handle the error in upper layers
panic!("Poller is down");
}
}
assert!(context.fiber.is_none(), "Nested schedulers");
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
context.fiber = Some(fiber as _);
});
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
finished = fiber.run_once();
CURRENT_CONTEXT.with(|context| {
context.borrow_mut().fiber = None;
});
fiber.is_runnable()
};
if finished {
self.fibers.remove(&fiber_id);
} else if is_runnable {
self.schedule(fiber_id);
}
}
fn next_fiber_id(&mut self) -> fiber::FiberId {
loop {
let id = self.next_fiber_id;
self.next_fiber_id = id.wrapping_add(1);
if !self.fibers.contains_key(&id) {
return id;
}
}
}
fn schedule(&mut self, fiber_id: fiber::FiberId) {
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
if !fiber.in_run_queue {
self.run_queue.push_back(fiber_id);
fiber.in_run_queue = true;
}
}
fn next_runnable(&mut self) -> Option<fiber::FiberId> {
while let Some(fiber_id) = self.run_queue.pop_front() {
if let Some(fiber) = self.fibers.get_mut(&fiber_id) {
fiber.in_run_queue = false;
return Some(fiber_id);
}
}
None
}
}
/// A handle of a scheduler.
#[derive(Debug, Clone)]
pub struct SchedulerHandle {
request_tx: RequestSender,
}
impl SchedulerHandle {
/// Wakes up a specified fiber in the scheduler.
///
/// This forces the fiber to be pushed to the run queue of the scheduler.
pub fn wakeup(&self, fiber_id: fiber::FiberId) {
let _ = self.request_tx.send(Request::WakeUp(fiber_id));
}
}
impl Spawn for SchedulerHandle {
fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) {
let _ = self.request_tx.send(Request::Spawn(Task(fiber)));
}
}
#[derive(Debug)]
pub struct CurrentScheduler {
pub id: SchedulerId,
pub handle: SchedulerHandle,
pub poller: poll::PollerHandle,
}
/// Calls `f` with the current execution context.
///
/// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`.
pub fn with_current_context<F, T>(f: F) -> Option<T>
where
F: FnOnce(Context) -> T,
{
CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f))
}
/// The execution context of the currently running fiber.
#[derive(Debug)]
pub struct Context<'a> {
scheduler: &'a mut CurrentScheduler,
fiber: &'a mut FiberState,
}
impl<'a> Context<'a> {
/// Returns the identifier of the current exeuction context.
pub fn context_id(&self) -> super::ContextId {
(self.scheduler.id, self.fiber.fiber_id)
}
/// Parks the current fiber.
pub fn park(&mut self) -> super::Unpark {
self.fiber
.park(self.scheduler.id, self.scheduler.handle.clone())
}
/// Returns the I/O event poller for this context.
pub fn poller(&mut self) -> &mut poll::PollerHandle {
&mut self.scheduler.poller
}
}
/// Cooperatively gives up a poll for the current future (fiber).
///
/// # Examples
///
/// ```
/// # extern crate fibers;
/// # extern crate futures;
/// use fibers::{fiber, Executor, InPlaceExecutor, Spawn};
/// use futures::{Future, Async, Poll};
///
/// struct HeavyCalculation {
/// polled_count: usize,
/// loops: usize
/// }
/// impl HeavyCalculation {
/// fn new(loop_count: usize) -> Self {
/// HeavyCalculation { polled_count: 0, loops: loop_count }
/// }
/// }
/// impl Future for HeavyCalculation {
/// type Item = usize;
/// type Error = ();
/// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
/// self.polled_count += 1;
///
/// let mut per_poll_loop_limit = 10;
/// while self.loops > 0 {
/// self.loops -= 1;
/// per_poll_loop_limit -= 1;
/// if per_poll_loop_limit == 0 {
/// // Suspends calculation and gives execution to other fibers.
/// return fiber::yield_poll();
/// }
/// }
/// Ok(Async::Ready(self.polled_count))
/// }
/// }
///
/// let mut executor = InPlaceExecutor::new | {} | conditional_block |
schedule.rs | ::new(0);
thread_local! {
static CURRENT_CONTEXT: RefCell<InnerContext> = {
RefCell::new(InnerContext::new())
};
}
type RequestSender = std_mpsc::Sender<Request>;
type RequestReceiver = std_mpsc::Receiver<Request>;
/// The identifier of a scheduler.
pub type SchedulerId = usize;
/// Scheduler of spawned fibers.
///
/// Scheduler manages spawned fibers state.
/// If a fiber is in runnable state (e.g., not waiting for I/O events),
/// the scheduler will push the fiber in it's run queue.
/// When `run_once` method is called, the first fiber (i.e., future) in the queue
/// will be poped and executed (i.e., `Future::poll` method is called).
/// If the future of a fiber moves to readied state,
/// it will be removed from the scheduler.
/// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread.
#[derive(Debug)]
pub struct Scheduler {
scheduler_id: SchedulerId,
next_fiber_id: fiber::FiberId,
fibers: HashMap<fiber::FiberId, fiber::FiberState>,
run_queue: VecDeque<fiber::FiberId>,
request_tx: RequestSender,
request_rx: RequestReceiver,
poller: poll::PollerHandle,
}
impl Scheduler {
/// Creates a new scheduler instance.
pub fn new(poller: poll::PollerHandle) -> Self {
let (request_tx, request_rx) = std_mpsc::channel();
Scheduler {
scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst),
next_fiber_id: 0,
fibers: HashMap::new(),
run_queue: VecDeque::new(),
request_tx,
request_rx,
poller,
}
}
/// Returns the identifier of this scheduler.
pub fn scheduler_id(&self) -> SchedulerId {
self.scheduler_id
}
/// Returns the length of the run queue of this scheduler.
pub fn run_queue_len(&self) -> usize {
self.run_queue.len()
}
/// Returns the count of alive fibers (i.e., not readied futures) in this scheduler.
pub fn fiber_count(&self) -> usize {
self.fibers.len()
}
/// Returns a handle of this scheduler.
pub fn handle(&self) -> SchedulerHandle {
SchedulerHandle {
request_tx: self.request_tx.clone(),
}
}
/// Runs one unit of works.
pub fn run_once(&mut self, block_if_idle: bool) {
let mut did_something = false;
loop {
// Request
match self.request_rx.try_recv() {
Err(std_mpsc::TryRecvError::Empty) => {}
Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(),
Ok(request) => {
did_something = true;
self.handle_request(request);
}
}
// Task
if let Some(fiber_id) = self.next_runnable() {
did_something = true;
self.run_fiber(fiber_id);
}
if !block_if_idle || did_something {
break;
}
let request = self.request_rx.recv().expect("must succeed");
did_something = true;
self.handle_request(request);
}
}
fn handle_request(&mut self, request: Request) {
match request {
Request::Spawn(task) => self.spawn_fiber(task),
Request::WakeUp(fiber_id) => {
if self.fibers.contains_key(&fiber_id) {
self.schedule(fiber_id);
}
}
}
}
fn spawn_fiber(&mut self, task: Task) {
let fiber_id = self.next_fiber_id();
self.fibers
.insert(fiber_id, fiber::FiberState::new(fiber_id, task));
self.schedule(fiber_id);
}
fn run_fiber(&mut self, fiber_id: fiber::FiberId) {
let finished;
let is_runnable = {
CURRENT_CONTEXT.with(|context| {
let mut context = context.borrow_mut();
if context
.scheduler
.as_ref()
.map_or(true, |s| s.id != self.scheduler_id)
{
context.switch(self);
}
{
let scheduler = assert_some!(context.scheduler.as_mut());
if !scheduler.poller.is_alive() {
// TODO: Return `Err(io::Error)` to caller and
// handle the error in upper layers
panic!("Poller is down");
}
}
assert!(context.fiber.is_none(), "Nested schedulers");
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
context.fiber = Some(fiber as _);
});
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
finished = fiber.run_once();
CURRENT_CONTEXT.with(|context| {
context.borrow_mut().fiber = None;
});
fiber.is_runnable()
};
if finished {
self.fibers.remove(&fiber_id);
} else if is_runnable {
self.schedule(fiber_id);
}
}
fn next_fiber_id(&mut self) -> fiber::FiberId {
loop {
let id = self.next_fiber_id;
self.next_fiber_id = id.wrapping_add(1);
if !self.fibers.contains_key(&id) {
return id;
}
}
}
fn schedule(&mut self, fiber_id: fiber::FiberId) {
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
if !fiber.in_run_queue {
self.run_queue.push_back(fiber_id);
fiber.in_run_queue = true;
}
}
fn next_runnable(&mut self) -> Option<fiber::FiberId> {
while let Some(fiber_id) = self.run_queue.pop_front() {
if let Some(fiber) = self.fibers.get_mut(&fiber_id) {
fiber.in_run_queue = false;
return Some(fiber_id);
}
}
None
}
}
/// A handle of a scheduler.
#[derive(Debug, Clone)]
pub struct SchedulerHandle {
request_tx: RequestSender,
}
impl SchedulerHandle {
/// Wakes up a specified fiber in the scheduler.
///
/// This forces the fiber to be pushed to the run queue of the scheduler.
pub fn wakeup(&self, fiber_id: fiber::FiberId) {
let _ = self.request_tx.send(Request::WakeUp(fiber_id));
}
}
impl Spawn for SchedulerHandle {
fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) {
let _ = self.request_tx.send(Request::Spawn(Task(fiber)));
}
}
#[derive(Debug)]
pub struct CurrentScheduler {
pub id: SchedulerId,
pub handle: SchedulerHandle,
pub poller: poll::PollerHandle,
}
/// Calls `f` with the current execution context.
///
/// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`.
pub fn with_current_context<F, T>(f: F) -> Option<T>
where
F: FnOnce(Context) -> T,
{
CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f))
}
/// The execution context of the currently running fiber.
#[derive(Debug)]
pub struct | <'a> {
scheduler: &'a mut CurrentScheduler,
fiber: &'a mut FiberState,
}
impl<'a> Context<'a> {
/// Returns the identifier of the current exeuction context.
pub fn context_id(&self) -> super::ContextId {
(self.scheduler.id, self.fiber.fiber_id)
}
/// Parks the current fiber.
pub fn park(&mut self) -> super::Unpark {
self.fiber
.park(self.scheduler.id, self.scheduler.handle.clone())
}
/// Returns the I/O event poller for this context.
pub fn poller(&mut self) -> &mut poll::PollerHandle {
&mut self.scheduler.poller
}
}
/// Cooperatively gives up a poll for the current future (fiber).
///
/// # Examples
///
/// ```
/// # extern crate fibers;
/// # extern crate futures;
/// use fibers::{fiber, Executor, InPlaceExecutor, Spawn};
/// use futures::{Future, Async, Poll};
///
/// struct HeavyCalculation {
/// polled_count: usize,
/// loops: usize
/// }
/// impl HeavyCalculation {
/// fn new(loop_count: usize) -> Self {
/// HeavyCalculation { polled_count: 0, loops: loop_count }
/// }
/// }
/// impl Future for HeavyCalculation {
/// type Item = usize;
/// type Error = ();
/// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
/// self.polled_count += 1;
///
/// let mut per_poll_loop_limit = 10;
/// while self.loops > 0 {
/// self.loops -= 1;
/// per_poll_loop_limit -= 1;
/// if per_poll_loop_limit == 0 {
/// // Suspends calculation and gives execution to other fibers.
/// return fiber::yield_poll();
/// }
/// }
/// Ok(Async::Ready(self.polled_count))
/// }
/// }
///
/// let mut executor = InPlaceExecutor::new | Context | identifier_name |
schedule.rs | new(0);
thread_local! {
static CURRENT_CONTEXT: RefCell<InnerContext> = {
RefCell::new(InnerContext::new())
};
}
type RequestSender = std_mpsc::Sender<Request>;
type RequestReceiver = std_mpsc::Receiver<Request>;
/// The identifier of a scheduler.
pub type SchedulerId = usize;
/// Scheduler of spawned fibers.
///
/// Scheduler manages spawned fibers state.
/// If a fiber is in runnable state (e.g., not waiting for I/O events),
/// the scheduler will push the fiber in it's run queue.
/// When `run_once` method is called, the first fiber (i.e., future) in the queue
/// will be poped and executed (i.e., `Future::poll` method is called).
/// If the future of a fiber moves to readied state,
/// it will be removed from the scheduler.
/// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread.
#[derive(Debug)]
pub struct Scheduler {
scheduler_id: SchedulerId,
next_fiber_id: fiber::FiberId,
fibers: HashMap<fiber::FiberId, fiber::FiberState>,
run_queue: VecDeque<fiber::FiberId>,
request_tx: RequestSender,
request_rx: RequestReceiver,
poller: poll::PollerHandle,
}
impl Scheduler {
/// Creates a new scheduler instance.
pub fn new(poller: poll::PollerHandle) -> Self {
let (request_tx, request_rx) = std_mpsc::channel();
Scheduler {
scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst),
next_fiber_id: 0,
fibers: HashMap::new(),
run_queue: VecDeque::new(),
request_tx,
request_rx,
poller,
}
}
/// Returns the identifier of this scheduler.
pub fn scheduler_id(&self) -> SchedulerId {
self.scheduler_id
}
/// Returns the length of the run queue of this scheduler.
pub fn run_queue_len(&self) -> usize {
self.run_queue.len()
}
/// Returns the count of alive fibers (i.e., not readied futures) in this scheduler.
pub fn fiber_count(&self) -> usize {
self.fibers.len()
}
/// Returns a handle of this scheduler.
pub fn handle(&self) -> SchedulerHandle {
SchedulerHandle {
request_tx: self.request_tx.clone(),
}
}
/// Runs one unit of works.
pub fn run_once(&mut self, block_if_idle: bool) {
let mut did_something = false;
loop {
// Request
match self.request_rx.try_recv() {
Err(std_mpsc::TryRecvError::Empty) => {}
Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(),
Ok(request) => {
did_something = true;
self.handle_request(request);
}
}
// Task
if let Some(fiber_id) = self.next_runnable() {
did_something = true;
self.run_fiber(fiber_id);
}
if !block_if_idle || did_something {
break;
}
let request = self.request_rx.recv().expect("must succeed");
did_something = true;
self.handle_request(request);
}
}
fn handle_request(&mut self, request: Request) {
match request {
Request::Spawn(task) => self.spawn_fiber(task),
Request::WakeUp(fiber_id) => {
if self.fibers.contains_key(&fiber_id) {
self.schedule(fiber_id);
}
}
}
}
fn spawn_fiber(&mut self, task: Task) {
let fiber_id = self.next_fiber_id();
self.fibers
.insert(fiber_id, fiber::FiberState::new(fiber_id, task));
self.schedule(fiber_id);
}
fn run_fiber(&mut self, fiber_id: fiber::FiberId) {
let finished;
let is_runnable = {
CURRENT_CONTEXT.with(|context| {
let mut context = context.borrow_mut();
if context
.scheduler
.as_ref()
.map_or(true, |s| s.id != self.scheduler_id)
{
context.switch(self);
}
{
let scheduler = assert_some!(context.scheduler.as_mut());
if !scheduler.poller.is_alive() {
// TODO: Return `Err(io::Error)` to caller and
// handle the error in upper layers
panic!("Poller is down");
}
}
assert!(context.fiber.is_none(), "Nested schedulers");
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
context.fiber = Some(fiber as _);
});
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
finished = fiber.run_once();
CURRENT_CONTEXT.with(|context| {
context.borrow_mut().fiber = None;
});
fiber.is_runnable()
};
if finished {
self.fibers.remove(&fiber_id);
} else if is_runnable {
self.schedule(fiber_id);
}
}
fn next_fiber_id(&mut self) -> fiber::FiberId {
loop {
let id = self.next_fiber_id;
self.next_fiber_id = id.wrapping_add(1);
if !self.fibers.contains_key(&id) {
return id;
}
}
}
fn schedule(&mut self, fiber_id: fiber::FiberId) |
fn next_runnable(&mut self) -> Option<fiber::FiberId> {
while let Some(fiber_id) = self.run_queue.pop_front() {
if let Some(fiber) = self.fibers.get_mut(&fiber_id) {
fiber.in_run_queue = false;
return Some(fiber_id);
}
}
None
}
}
/// A handle of a scheduler.
#[derive(Debug, Clone)]
pub struct SchedulerHandle {
request_tx: RequestSender,
}
impl SchedulerHandle {
/// Wakes up a specified fiber in the scheduler.
///
/// This forces the fiber to be pushed to the run queue of the scheduler.
pub fn wakeup(&self, fiber_id: fiber::FiberId) {
let _ = self.request_tx.send(Request::WakeUp(fiber_id));
}
}
impl Spawn for SchedulerHandle {
fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) {
let _ = self.request_tx.send(Request::Spawn(Task(fiber)));
}
}
#[derive(Debug)]
pub struct CurrentScheduler {
pub id: SchedulerId,
pub handle: SchedulerHandle,
pub poller: poll::PollerHandle,
}
/// Calls `f` with the current execution context.
///
/// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`.
pub fn with_current_context<F, T>(f: F) -> Option<T>
where
F: FnOnce(Context) -> T,
{
CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f))
}
/// The execution context of the currently running fiber.
#[derive(Debug)]
pub struct Context<'a> {
scheduler: &'a mut CurrentScheduler,
fiber: &'a mut FiberState,
}
impl<'a> Context<'a> {
/// Returns the identifier of the current exeuction context.
pub fn context_id(&self) -> super::ContextId {
(self.scheduler.id, self.fiber.fiber_id)
}
/// Parks the current fiber.
pub fn park(&mut self) -> super::Unpark {
self.fiber
.park(self.scheduler.id, self.scheduler.handle.clone())
}
/// Returns the I/O event poller for this context.
pub fn poller(&mut self) -> &mut poll::PollerHandle {
&mut self.scheduler.poller
}
}
/// Cooperatively gives up a poll for the current future (fiber).
///
/// # Examples
///
/// ```
/// # extern crate fibers;
/// # extern crate futures;
/// use fibers::{fiber, Executor, InPlaceExecutor, Spawn};
/// use futures::{Future, Async, Poll};
///
/// struct HeavyCalculation {
/// polled_count: usize,
/// loops: usize
/// }
/// impl HeavyCalculation {
/// fn new(loop_count: usize) -> Self {
/// HeavyCalculation { polled_count: 0, loops: loop_count }
/// }
/// }
/// impl Future for HeavyCalculation {
/// type Item = usize;
/// type Error = ();
/// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
/// self.polled_count += 1;
///
/// let mut per_poll_loop_limit = 10;
/// while self.loops > 0 {
/// self.loops -= 1;
/// per_poll_loop_limit -= 1;
/// if per_poll_loop_limit == 0 {
/// // Suspends calculation and gives execution to other fibers.
/// return fiber::yield_poll();
/// }
/// }
/// Ok(Async::Ready(self.polled_count))
/// }
/// }
///
/// let mut executor = InPlaceExecutor::new | {
let fiber = assert_some!(self.fibers.get_mut(&fiber_id));
if !fiber.in_run_queue {
self.run_queue.push_back(fiber_id);
fiber.in_run_queue = true;
}
} | identifier_body |
HttpServiceSparqlEndpoint.ts | value\\" : \\"http://fragments.dbpedia.org/2015/en\\" }]}" [-p port] [-t timeout] [-l log-level] [-i] [--help]
Options:
-p The HTTP port to run on (default: 3000)
-t The query execution timeout in seconds (default: 60)
-l Sets the log level (e.g., debug, info, warn, ... defaults to warn)
-i A flag that enables cache invalidation before each query execution.
--help print this help message
`;
// tslint:enable:max-line-length
public readonly engine: Promise<ActorInitSparql>;
public readonly context: any;
public readonly timeout: number;
public readonly port: number;
public readonly invalidateCacheBeforeQuery: boolean;
constructor(args?: IHttpServiceSparqlEndpointArgs) {
args = args || {};
this.context = args.context || {};
this.timeout = args.timeout || 60000;
this.port = args.port || 3000;
this.invalidateCacheBeforeQuery = args.invalidateCacheBeforeQuery;
this.engine = newEngineDynamic(args);
}
/**
* Starts the server
* @param {string[]} argv The commandline arguments that the script was called with
* @param {module:stream.internal.Writable} stdout The output stream to log to.
* @param {module:stream.internal.Writable} stderr The error stream to log errors to.
* @param {string} moduleRootPath The path to the invoking module.
* @param {NodeJS.ProcessEnv} env The process env to get constants from.
* @param {string} defaultConfigPath The path to get the config from if none is defined in the environment.
* @param {(code: number) => void} exit The callback to invoke to stop the script.
* @return {Promise<void>} A promise that resolves when the server has been started.
*/
public static runArgsInProcess(argv: string[], stdout: Writable, stderr: Writable,
moduleRootPath: string, env: NodeJS.ProcessEnv,
defaultConfigPath: string, exit: (code: number) => void): Promise<void> {
const args = minimist(argv);
if (args._.length !== 1 || args.h || args.help) {
stderr.write(HttpServiceSparqlEndpoint.HELP_MESSAGE);
exit(1);
}
const options = HttpServiceSparqlEndpoint
.generateConstructorArguments(args, moduleRootPath, env, defaultConfigPath);
return new Promise<void>((resolve) => {
new HttpServiceSparqlEndpoint(options).run(stdout, stderr)
.then(resolve)
.catch((reason) => {
stderr.write(reason);
exit(1);
resolve();
});
});
}
/**
* Takes parsed commandline arguments and turns them into an object used in the HttpServiceSparqlEndpoint constructor
* @param {args: minimist.ParsedArgs} args The commandline arguments that the script was called with
* @param {string} moduleRootPath The path to the invoking module.
* @param {NodeJS.ProcessEnv} env The process env to get constants from.
* @param {string} defaultConfigPath The path to get the config from if none is defined in the environment.
*/
public static generateConstructorArguments(args: minimist.ParsedArgs, moduleRootPath: string,
env: NodeJS.ProcessEnv, defaultConfigPath: string)
: IHttpServiceSparqlEndpointArgs {
// allow both files as direct JSON objects for context
const context = JSON.parse(fs.existsSync(args._[0]) ? fs.readFileSync(args._[0], 'utf8') : args._[0]);
const invalidateCacheBeforeQuery: boolean = args.i;
const port = parseInt(args.p, 10) || 3000;
const timeout = (parseInt(args.t, 10) || 60) * 1000;
// Set the logger
if (!context.log) {
context.log = new LoggerPretty({ level: args.l || 'warn' });
}
const configResourceUrl = env.COMUNICA_CONFIG ? env.COMUNICA_CONFIG : defaultConfigPath;
return {
configResourceUrl,
context,
invalidateCacheBeforeQuery,
mainModulePath: moduleRootPath,
port,
timeout,
};
}
/**
* Start the HTTP service.
* @param {module:stream.internal.Writable} stdout The output stream to log to.
* @param {module:stream.internal.Writable} stderr The error stream to log errors to.
*/
public async run(stdout: Writable, stderr: Writable) {
const engine: ActorInitSparql = await this.engine;
// Determine the allowed media types for requests
const mediaTypes: {[id: string]: number} = await engine.getResultMediaTypes(null);
const variants: { type: string, quality: number }[] = [];
for (const type of Object.keys(mediaTypes)) {
variants.push({ type, quality: mediaTypes[type] });
}
// Start the server
const server = http.createServer(this.handleRequest.bind(this, engine, variants, stdout, stderr));
server.listen(this.port);
server.setTimeout(2 * this.timeout); // unreliable mechanism, set too high on purpose
stderr.write('Server running on http://localhost:' + this.port + '/\n');
}
/**
* Handles an HTTP request.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {{type: string; quality: number}[]} variants Allowed variants.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
*/
public async | (engine: ActorInitSparql, variants: { type: string, quality: number }[],
stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse) {
const mediaType: string = request.headers.accept && request.headers.accept !== '*/*'
? require('negotiate').choose(variants, request)[0].type : null;
// Verify the path
const requestUrl = url.parse(request.url, true);
if (requestUrl.pathname !== '/sparql') {
stdout.write('[404] Resource not found\n');
response.writeHead(404,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Resource not found' }));
return;
}
if (this.invalidateCacheBeforeQuery) {
// Invalidate cache
await engine.invalidateHttpCache();
}
// Parse the query, depending on the HTTP method
let sparql;
switch (request.method) {
case 'POST':
sparql = await this.parseBody(request);
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, false);
break;
case 'HEAD':
case 'GET':
sparql = <string> (<querystring.ParsedUrlQuery> requestUrl.query).query || '';
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, request.method === 'HEAD');
break;
default:
stdout.write('[405] ' + request.method + ' to ' + requestUrl + '\n');
response.writeHead(405,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Incorrect HTTP method' }));
}
}
/**
* Writes the result of the given SPARQL query.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
* @param {string} sparql The SPARQL query string.
* @param {string} mediaType The requested response media type.
* @param {boolean} headOnly If only the header should be written.
*/
public writeQueryResult(engine: ActorInitSparql, stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse,
sparql: string, mediaType: string, headOnly: boolean) {
let eventEmitter: EventEmitter;
engine.query(sparql, this.context)
.then(async (result) => {
stdout.write('[200] ' + request.method + ' to ' + request.url + '\n');
stdout.write(' Requested media type: ' + mediaType + '\n');
stdout.write(' Received query: ' + sparql + '\n');
response.writeHead(200, { 'content-type': mediaType, 'Access-Control-Allow-Origin': '*' });
if (headOnly) {
response.end();
return;
}
try {
const data: NodeJS.ReadableStream = (await engine | handleRequest | identifier_name |
HttpServiceSparqlEndpoint.ts | @param {(code: number) => void} exit The callback to invoke to stop the script.
* @return {Promise<void>} A promise that resolves when the server has been started.
*/
public static runArgsInProcess(argv: string[], stdout: Writable, stderr: Writable,
moduleRootPath: string, env: NodeJS.ProcessEnv,
defaultConfigPath: string, exit: (code: number) => void): Promise<void> {
const args = minimist(argv);
if (args._.length !== 1 || args.h || args.help) {
stderr.write(HttpServiceSparqlEndpoint.HELP_MESSAGE);
exit(1);
}
const options = HttpServiceSparqlEndpoint
.generateConstructorArguments(args, moduleRootPath, env, defaultConfigPath);
return new Promise<void>((resolve) => {
new HttpServiceSparqlEndpoint(options).run(stdout, stderr)
.then(resolve)
.catch((reason) => {
stderr.write(reason);
exit(1);
resolve();
});
});
}
/**
* Takes parsed commandline arguments and turns them into an object used in the HttpServiceSparqlEndpoint constructor
* @param {args: minimist.ParsedArgs} args The commandline arguments that the script was called with
* @param {string} moduleRootPath The path to the invoking module.
* @param {NodeJS.ProcessEnv} env The process env to get constants from.
* @param {string} defaultConfigPath The path to get the config from if none is defined in the environment.
*/
public static generateConstructorArguments(args: minimist.ParsedArgs, moduleRootPath: string,
env: NodeJS.ProcessEnv, defaultConfigPath: string)
: IHttpServiceSparqlEndpointArgs {
// allow both files as direct JSON objects for context
const context = JSON.parse(fs.existsSync(args._[0]) ? fs.readFileSync(args._[0], 'utf8') : args._[0]);
const invalidateCacheBeforeQuery: boolean = args.i;
const port = parseInt(args.p, 10) || 3000;
const timeout = (parseInt(args.t, 10) || 60) * 1000;
// Set the logger
if (!context.log) {
context.log = new LoggerPretty({ level: args.l || 'warn' });
}
const configResourceUrl = env.COMUNICA_CONFIG ? env.COMUNICA_CONFIG : defaultConfigPath;
return {
configResourceUrl,
context,
invalidateCacheBeforeQuery,
mainModulePath: moduleRootPath,
port,
timeout,
};
}
/**
* Start the HTTP service.
* @param {module:stream.internal.Writable} stdout The output stream to log to.
* @param {module:stream.internal.Writable} stderr The error stream to log errors to.
*/
public async run(stdout: Writable, stderr: Writable) {
const engine: ActorInitSparql = await this.engine;
// Determine the allowed media types for requests
const mediaTypes: {[id: string]: number} = await engine.getResultMediaTypes(null);
const variants: { type: string, quality: number }[] = [];
for (const type of Object.keys(mediaTypes)) {
variants.push({ type, quality: mediaTypes[type] });
}
// Start the server
const server = http.createServer(this.handleRequest.bind(this, engine, variants, stdout, stderr));
server.listen(this.port);
server.setTimeout(2 * this.timeout); // unreliable mechanism, set too high on purpose
stderr.write('Server running on http://localhost:' + this.port + '/\n');
}
/**
* Handles an HTTP request.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {{type: string; quality: number}[]} variants Allowed variants.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
*/
public async handleRequest(engine: ActorInitSparql, variants: { type: string, quality: number }[],
stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse) {
const mediaType: string = request.headers.accept && request.headers.accept !== '*/*'
? require('negotiate').choose(variants, request)[0].type : null;
// Verify the path
const requestUrl = url.parse(request.url, true);
if (requestUrl.pathname !== '/sparql') {
stdout.write('[404] Resource not found\n');
response.writeHead(404,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Resource not found' }));
return;
}
if (this.invalidateCacheBeforeQuery) {
// Invalidate cache
await engine.invalidateHttpCache();
}
// Parse the query, depending on the HTTP method
let sparql;
switch (request.method) {
case 'POST':
sparql = await this.parseBody(request);
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, false);
break;
case 'HEAD':
case 'GET':
sparql = <string> (<querystring.ParsedUrlQuery> requestUrl.query).query || '';
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, request.method === 'HEAD');
break;
default:
stdout.write('[405] ' + request.method + ' to ' + requestUrl + '\n');
response.writeHead(405,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Incorrect HTTP method' }));
}
}
/**
* Writes the result of the given SPARQL query.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
* @param {string} sparql The SPARQL query string.
* @param {string} mediaType The requested response media type.
* @param {boolean} headOnly If only the header should be written.
*/
public writeQueryResult(engine: ActorInitSparql, stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse,
sparql: string, mediaType: string, headOnly: boolean) {
let eventEmitter: EventEmitter;
engine.query(sparql, this.context)
.then(async (result) => {
stdout.write('[200] ' + request.method + ' to ' + request.url + '\n');
stdout.write(' Requested media type: ' + mediaType + '\n');
stdout.write(' Received query: ' + sparql + '\n');
response.writeHead(200, { 'content-type': mediaType, 'Access-Control-Allow-Origin': '*' });
if (headOnly) {
response.end();
return;
}
try {
const data: NodeJS.ReadableStream = (await engine.resultToString(result, mediaType)).data;
data.on('error', (e: Error) => {
stdout.write('[500] Server error in results: ' + e + ' \n');
response.end('An internal server error occurred.\n');
});
data.pipe(response);
eventEmitter = data;
} catch (error) {
stdout.write('[400] Bad request, invalid media type\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end('The response for the given query could not be serialized for the requested media type\n');
}
}).catch((error) => {
stdout.write('[400] Bad request\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end(error.toString());
});
this.stopResponse(response, eventEmitter);
}
/**
* Stop after timeout or if the connection is terminated
* @param {module:http.ServerResponse} response Response object.
* @param {NodeJS.ReadableStream} eventEmitter Query result stream.
*/
public stopResponse(response: http.ServerResponse, eventEmitter: EventEmitter) {
// Note: socket or response timeouts seemed unreliable, hence the explicit timeout
const killTimeout = setTimeout(killClient, this.timeout);
response.on('close', killClient);
function killClient() {
if (eventEmitter) {
// remove all listeners so we are sure no more write calls are made
eventEmitter.removeAllListeners();
eventEmitter.emit('end'); | }
try { response.end(); } catch (e) { /* ignore error */ }
clearTimeout(killTimeout);
} | random_line_split |
|
HttpServiceSparqlEndpoint.ts | (args, moduleRootPath, env, defaultConfigPath);
return new Promise<void>((resolve) => {
new HttpServiceSparqlEndpoint(options).run(stdout, stderr)
.then(resolve)
.catch((reason) => {
stderr.write(reason);
exit(1);
resolve();
});
});
}
/**
* Takes parsed commandline arguments and turns them into an object used in the HttpServiceSparqlEndpoint constructor
* @param {args: minimist.ParsedArgs} args The commandline arguments that the script was called with
* @param {string} moduleRootPath The path to the invoking module.
* @param {NodeJS.ProcessEnv} env The process env to get constants from.
* @param {string} defaultConfigPath The path to get the config from if none is defined in the environment.
*/
public static generateConstructorArguments(args: minimist.ParsedArgs, moduleRootPath: string,
env: NodeJS.ProcessEnv, defaultConfigPath: string)
: IHttpServiceSparqlEndpointArgs {
// allow both files as direct JSON objects for context
const context = JSON.parse(fs.existsSync(args._[0]) ? fs.readFileSync(args._[0], 'utf8') : args._[0]);
const invalidateCacheBeforeQuery: boolean = args.i;
const port = parseInt(args.p, 10) || 3000;
const timeout = (parseInt(args.t, 10) || 60) * 1000;
// Set the logger
if (!context.log) {
context.log = new LoggerPretty({ level: args.l || 'warn' });
}
const configResourceUrl = env.COMUNICA_CONFIG ? env.COMUNICA_CONFIG : defaultConfigPath;
return {
configResourceUrl,
context,
invalidateCacheBeforeQuery,
mainModulePath: moduleRootPath,
port,
timeout,
};
}
/**
* Start the HTTP service.
* @param {module:stream.internal.Writable} stdout The output stream to log to.
* @param {module:stream.internal.Writable} stderr The error stream to log errors to.
*/
public async run(stdout: Writable, stderr: Writable) {
const engine: ActorInitSparql = await this.engine;
// Determine the allowed media types for requests
const mediaTypes: {[id: string]: number} = await engine.getResultMediaTypes(null);
const variants: { type: string, quality: number }[] = [];
for (const type of Object.keys(mediaTypes)) {
variants.push({ type, quality: mediaTypes[type] });
}
// Start the server
const server = http.createServer(this.handleRequest.bind(this, engine, variants, stdout, stderr));
server.listen(this.port);
server.setTimeout(2 * this.timeout); // unreliable mechanism, set too high on purpose
stderr.write('Server running on http://localhost:' + this.port + '/\n');
}
/**
* Handles an HTTP request.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {{type: string; quality: number}[]} variants Allowed variants.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
*/
public async handleRequest(engine: ActorInitSparql, variants: { type: string, quality: number }[],
stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse) {
const mediaType: string = request.headers.accept && request.headers.accept !== '*/*'
? require('negotiate').choose(variants, request)[0].type : null;
// Verify the path
const requestUrl = url.parse(request.url, true);
if (requestUrl.pathname !== '/sparql') {
stdout.write('[404] Resource not found\n');
response.writeHead(404,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Resource not found' }));
return;
}
if (this.invalidateCacheBeforeQuery) {
// Invalidate cache
await engine.invalidateHttpCache();
}
// Parse the query, depending on the HTTP method
let sparql;
switch (request.method) {
case 'POST':
sparql = await this.parseBody(request);
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, false);
break;
case 'HEAD':
case 'GET':
sparql = <string> (<querystring.ParsedUrlQuery> requestUrl.query).query || '';
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, request.method === 'HEAD');
break;
default:
stdout.write('[405] ' + request.method + ' to ' + requestUrl + '\n');
response.writeHead(405,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Incorrect HTTP method' }));
}
}
/**
* Writes the result of the given SPARQL query.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
* @param {string} sparql The SPARQL query string.
* @param {string} mediaType The requested response media type.
* @param {boolean} headOnly If only the header should be written.
*/
public writeQueryResult(engine: ActorInitSparql, stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse,
sparql: string, mediaType: string, headOnly: boolean) {
let eventEmitter: EventEmitter;
engine.query(sparql, this.context)
.then(async (result) => {
stdout.write('[200] ' + request.method + ' to ' + request.url + '\n');
stdout.write(' Requested media type: ' + mediaType + '\n');
stdout.write(' Received query: ' + sparql + '\n');
response.writeHead(200, { 'content-type': mediaType, 'Access-Control-Allow-Origin': '*' });
if (headOnly) {
response.end();
return;
}
try {
const data: NodeJS.ReadableStream = (await engine.resultToString(result, mediaType)).data;
data.on('error', (e: Error) => {
stdout.write('[500] Server error in results: ' + e + ' \n');
response.end('An internal server error occurred.\n');
});
data.pipe(response);
eventEmitter = data;
} catch (error) {
stdout.write('[400] Bad request, invalid media type\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end('The response for the given query could not be serialized for the requested media type\n');
}
}).catch((error) => {
stdout.write('[400] Bad request\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end(error.toString());
});
this.stopResponse(response, eventEmitter);
}
/**
* Stop after timeout or if the connection is terminated
* @param {module:http.ServerResponse} response Response object.
* @param {NodeJS.ReadableStream} eventEmitter Query result stream.
*/
public stopResponse(response: http.ServerResponse, eventEmitter: EventEmitter) {
// Note: socket or response timeouts seemed unreliable, hence the explicit timeout
const killTimeout = setTimeout(killClient, this.timeout);
response.on('close', killClient);
function killClient() {
if (eventEmitter) {
// remove all listeners so we are sure no more write calls are made
eventEmitter.removeAllListeners();
eventEmitter.emit('end');
}
try { response.end(); } catch (e) { /* ignore error */ }
clearTimeout(killTimeout);
}
}
/**
* Parses the body of a SPARQL POST request
* @param {module:http.IncomingMessage} request Request object.
* @return {Promise<string>} A promise resolving to a query string.
*/
public parseBody(request: http.IncomingMessage): Promise<string> {
return new Promise((resolve, reject) => {
let body = '';
request.setEncoding('utf8');
request.on('error', reject);
request.on('data', (chunk) => { body += chunk; });
request.on('end', () => {
const contentType: string = request.headers['content-type'];
if (contentType.indexOf('application/sparql-query') >= 0) | {
return resolve(body);
} | conditional_block |
|
HttpServiceSparqlEndpoint.ts | => {
stderr.write(reason);
exit(1);
resolve();
});
});
}
/**
* Takes parsed commandline arguments and turns them into an object used in the HttpServiceSparqlEndpoint constructor
* @param {args: minimist.ParsedArgs} args The commandline arguments that the script was called with
* @param {string} moduleRootPath The path to the invoking module.
* @param {NodeJS.ProcessEnv} env The process env to get constants from.
* @param {string} defaultConfigPath The path to get the config from if none is defined in the environment.
*/
public static generateConstructorArguments(args: minimist.ParsedArgs, moduleRootPath: string,
env: NodeJS.ProcessEnv, defaultConfigPath: string)
: IHttpServiceSparqlEndpointArgs {
// allow both files as direct JSON objects for context
const context = JSON.parse(fs.existsSync(args._[0]) ? fs.readFileSync(args._[0], 'utf8') : args._[0]);
const invalidateCacheBeforeQuery: boolean = args.i;
const port = parseInt(args.p, 10) || 3000;
const timeout = (parseInt(args.t, 10) || 60) * 1000;
// Set the logger
if (!context.log) {
context.log = new LoggerPretty({ level: args.l || 'warn' });
}
const configResourceUrl = env.COMUNICA_CONFIG ? env.COMUNICA_CONFIG : defaultConfigPath;
return {
configResourceUrl,
context,
invalidateCacheBeforeQuery,
mainModulePath: moduleRootPath,
port,
timeout,
};
}
/**
* Start the HTTP service.
* @param {module:stream.internal.Writable} stdout The output stream to log to.
* @param {module:stream.internal.Writable} stderr The error stream to log errors to.
*/
public async run(stdout: Writable, stderr: Writable) {
const engine: ActorInitSparql = await this.engine;
// Determine the allowed media types for requests
const mediaTypes: {[id: string]: number} = await engine.getResultMediaTypes(null);
const variants: { type: string, quality: number }[] = [];
for (const type of Object.keys(mediaTypes)) {
variants.push({ type, quality: mediaTypes[type] });
}
// Start the server
const server = http.createServer(this.handleRequest.bind(this, engine, variants, stdout, stderr));
server.listen(this.port);
server.setTimeout(2 * this.timeout); // unreliable mechanism, set too high on purpose
stderr.write('Server running on http://localhost:' + this.port + '/\n');
}
/**
* Handles an HTTP request.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {{type: string; quality: number}[]} variants Allowed variants.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
*/
public async handleRequest(engine: ActorInitSparql, variants: { type: string, quality: number }[],
stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse) {
const mediaType: string = request.headers.accept && request.headers.accept !== '*/*'
? require('negotiate').choose(variants, request)[0].type : null;
// Verify the path
const requestUrl = url.parse(request.url, true);
if (requestUrl.pathname !== '/sparql') {
stdout.write('[404] Resource not found\n');
response.writeHead(404,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Resource not found' }));
return;
}
if (this.invalidateCacheBeforeQuery) {
// Invalidate cache
await engine.invalidateHttpCache();
}
// Parse the query, depending on the HTTP method
let sparql;
switch (request.method) {
case 'POST':
sparql = await this.parseBody(request);
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, false);
break;
case 'HEAD':
case 'GET':
sparql = <string> (<querystring.ParsedUrlQuery> requestUrl.query).query || '';
this.writeQueryResult(engine, stdout, stderr, request, response, sparql, mediaType, request.method === 'HEAD');
break;
default:
stdout.write('[405] ' + request.method + ' to ' + requestUrl + '\n');
response.writeHead(405,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_JSON, 'Access-Control-Allow-Origin': '*' });
response.end(JSON.stringify({ message: 'Incorrect HTTP method' }));
}
}
/**
* Writes the result of the given SPARQL query.
* @param {ActorInitSparql} engine A SPARQL engine.
* @param {module:stream.internal.Writable} stdout Output stream.
* @param {module:stream.internal.Writable} stderr Error output stream.
* @param {module:http.IncomingMessage} request Request object.
* @param {module:http.ServerResponse} response Response object.
* @param {string} sparql The SPARQL query string.
* @param {string} mediaType The requested response media type.
* @param {boolean} headOnly If only the header should be written.
*/
public writeQueryResult(engine: ActorInitSparql, stdout: Writable, stderr: Writable,
request: http.IncomingMessage, response: http.ServerResponse,
sparql: string, mediaType: string, headOnly: boolean) {
let eventEmitter: EventEmitter;
engine.query(sparql, this.context)
.then(async (result) => {
stdout.write('[200] ' + request.method + ' to ' + request.url + '\n');
stdout.write(' Requested media type: ' + mediaType + '\n');
stdout.write(' Received query: ' + sparql + '\n');
response.writeHead(200, { 'content-type': mediaType, 'Access-Control-Allow-Origin': '*' });
if (headOnly) {
response.end();
return;
}
try {
const data: NodeJS.ReadableStream = (await engine.resultToString(result, mediaType)).data;
data.on('error', (e: Error) => {
stdout.write('[500] Server error in results: ' + e + ' \n');
response.end('An internal server error occurred.\n');
});
data.pipe(response);
eventEmitter = data;
} catch (error) {
stdout.write('[400] Bad request, invalid media type\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end('The response for the given query could not be serialized for the requested media type\n');
}
}).catch((error) => {
stdout.write('[400] Bad request\n');
response.writeHead(400,
{ 'content-type': HttpServiceSparqlEndpoint.MIME_PLAIN, 'Access-Control-Allow-Origin': '*' });
response.end(error.toString());
});
this.stopResponse(response, eventEmitter);
}
/**
* Stop after timeout or if the connection is terminated
* @param {module:http.ServerResponse} response Response object.
* @param {NodeJS.ReadableStream} eventEmitter Query result stream.
*/
public stopResponse(response: http.ServerResponse, eventEmitter: EventEmitter) {
// Note: socket or response timeouts seemed unreliable, hence the explicit timeout
const killTimeout = setTimeout(killClient, this.timeout);
response.on('close', killClient);
function killClient() {
if (eventEmitter) {
// remove all listeners so we are sure no more write calls are made
eventEmitter.removeAllListeners();
eventEmitter.emit('end');
}
try { response.end(); } catch (e) { /* ignore error */ }
clearTimeout(killTimeout);
}
}
/**
* Parses the body of a SPARQL POST request
* @param {module:http.IncomingMessage} request Request object.
* @return {Promise<string>} A promise resolving to a query string.
*/
public parseBody(request: http.IncomingMessage): Promise<string> | {
return new Promise((resolve, reject) => {
let body = '';
request.setEncoding('utf8');
request.on('error', reject);
request.on('data', (chunk) => { body += chunk; });
request.on('end', () => {
const contentType: string = request.headers['content-type'];
if (contentType.indexOf('application/sparql-query') >= 0) {
return resolve(body);
} else if (contentType.indexOf('application/x-www-form-urlencoded') >= 0) {
return resolve(<string> querystring.parse(body).query || '');
} else {
return resolve(body);
}
});
});
} | identifier_body |
|
__init__.py | from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def | (**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
automation.enable()
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in remove_funcs:
remove()
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enabled = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_sequence}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
waiting = deque(self.actions)
while waiting:
action = waiting.popleft()
if action.action_type == "automations" and action.action_id == ACTION_DELAY:
rendered_kwargs = action.render(variables)
seconds = rendered_kwargs.get("seconds")
self.delay(float(seconds), variables, waiting)
else:
_LOGGER.debug(
"Calling action %s.%s", action.action_type, action.action_id
)
await action(variables)
def delay(self, seconds, variables, waiting):
"""Delay action sequence.
Parameters
----------
seconds : float
A time interval to delay the pending action sequence.
variables : dict
A dict of template variables.
"""
sequence = ActionSequence(self._center, waiting)
callback = partial(self._center.create_task, sequence(variables))
waiting.clear()
_LOGGER.info("Action delay for %s seconds", seconds)
callback = self._center.loop.call_later(seconds, callback)
async def cancel_pending_actions(center, event):
"""Cancel pending actions."""
callback.cancel()
self._center.bus.register(CAMACQ_STOP_EVENT, cancel_pending_actions)
class TemplateAction:
"""Representation of an action with template data."""
# pylint: disable=too-few-public-methods
def __init__(self, center, action_conf):
"""Set up instance."""
self._center = center
self.action_id = action_conf[CONF_ID]
self.action_type = action_conf[CONF_TYPE]
action_data = action_conf[CONF_DATA]
self.template = make_template(center, action_data)
async def __call__(self, variables=None):
"""Execute action with optional template variables."""
| handle_action | identifier_name |
__init__.py | from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def handle_action(**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
|
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in remove_funcs:
remove()
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enabled = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_sequence}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
waiting = deque(self.actions)
while waiting:
action = waiting.popleft()
if action.action_type == "automations" and action.action_id == ACTION_DELAY:
rendered_kwargs = action.render(variables)
seconds = rendered_kwargs.get("seconds")
self.delay(float(seconds), variables, waiting)
else:
_LOGGER.debug(
"Calling action %s.%s", action.action_type, action.action_id
)
await action(variables)
def delay(self, seconds, variables, waiting):
"""Delay action sequence.
Parameters
----------
seconds : float
A time interval to delay the pending action sequence.
variables : dict
A dict of template variables.
"""
sequence = ActionSequence(self._center, waiting)
callback = partial(self._center.create_task, sequence(variables))
waiting.clear()
_LOGGER.info("Action delay for %s seconds", seconds)
callback = self._center.loop.call_later(seconds, callback)
async def cancel_pending_actions(center, event):
"""Cancel pending actions."""
callback.cancel()
self._center.bus.register(CAMACQ_STOP_EVENT, cancel_pending_actions)
class TemplateAction:
"""Representation of an action with template data."""
# pylint: disable=too-few-public-methods
def __init__(self, center, action_conf):
"""Set up instance."""
self._center = center
self.action_id = action_conf[CONF_ID]
self.action_type = action_conf[CONF_TYPE]
action_data = action_conf[CONF_DATA]
self.template = make_template(center, action_data)
async def __call__(self, variables=None):
"""Execute action with optional template variables."""
| automation.enable() | conditional_block |
__init__.py |
from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def handle_action(**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
automation.enable()
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in remove_funcs:
remove()
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enabled = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_sequence}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
waiting = deque(self.actions) | rendered_kwargs = action.render(variables)
seconds = rendered_kwargs.get("seconds")
self.delay(float(seconds), variables, waiting)
else:
_LOGGER.debug(
"Calling action %s.%s", action.action_type, action.action_id
)
await action(variables)
def delay(self, seconds, variables, waiting):
"""Delay action sequence.
Parameters
----------
seconds : float
A time interval to delay the pending action sequence.
variables : dict
A dict of template variables.
"""
sequence = ActionSequence(self._center, waiting)
callback = partial(self._center.create_task, sequence(variables))
waiting.clear()
_LOGGER.info("Action delay for %s seconds", seconds)
callback = self._center.loop.call_later(seconds, callback)
async def cancel_pending_actions(center, event):
"""Cancel pending actions."""
callback.cancel()
self._center.bus.register(CAMACQ_STOP_EVENT, cancel_pending_actions)
class TemplateAction:
"""Representation of an action with template data."""
# pylint: disable=too-few-public-methods
def __init__(self, center, action_conf):
"""Set up instance."""
self._center = center
self.action_id = action_conf[CONF_ID]
self.action_type = action_conf[CONF_TYPE]
action_data = action_conf[CONF_DATA]
self.template = make_template(center, action_data)
async def __call__(self, variables=None):
"""Execute action with optional template variables."""
| while waiting:
action = waiting.popleft()
if action.action_type == "automations" and action.action_id == ACTION_DELAY: | random_line_split |
__init__.py | from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def handle_action(**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
automation.enable()
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
|
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enabled = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_sequence}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
waiting = deque(self.actions)
while waiting:
action = waiting.popleft()
if action.action_type == "automations" and action.action_id == ACTION_DELAY:
rendered_kwargs = action.render(variables)
seconds = rendered_kwargs.get("seconds")
self.delay(float(seconds), variables, waiting)
else:
_LOGGER.debug(
"Calling action %s.%s", action.action_type, action.action_id
)
await action(variables)
def delay(self, seconds, variables, waiting):
"""Delay action sequence.
Parameters
----------
seconds : float
A time interval to delay the pending action sequence.
variables : dict
A dict of template variables.
"""
sequence = ActionSequence(self._center, waiting)
callback = partial(self._center.create_task, sequence(variables))
waiting.clear()
_LOGGER.info("Action delay for %s seconds", seconds)
callback = self._center.loop.call_later(seconds, callback)
async def cancel_pending_actions(center, event):
"""Cancel pending actions."""
callback.cancel()
self._center.bus.register(CAMACQ_STOP_EVENT, cancel_pending_actions)
class TemplateAction:
"""Representation of an action with template data."""
# pylint: disable=too-few-public-methods
def __init__(self, center, action_conf):
"""Set up instance."""
self._center = center
self.action_id = action_conf[CONF_ID]
self.action_type = action_conf[CONF_TYPE]
action_data = action_conf[CONF_DATA]
self.template = make_template(center, action_data)
async def __call__(self, variables=None):
"""Execute action with optional template variables."""
| """Remove attached triggers."""
for remove in remove_funcs:
remove() | identifier_body |
cosmos.go | : registryContractAddress,
PrivateKey: key,
DB: db,
SugaredLogger: sugaredLogger,
}
}
// Start a Cosmos chain subscription
func (sub CosmosSub) Start(completionEvent *sync.WaitGroup, symbolTranslator *symbol_translator.SymbolTranslator) {
defer completionEvent.Done()
time.Sleep(time.Second)
client, err := tmClient.New(sub.TmProvider, "/websocket")
if err != nil {
sub.SugaredLogger.Errorw("failed to initialize a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
if err := client.Start(); err != nil {
sub.SugaredLogger.Errorw("failed to start a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer client.Stop() //nolint:errcheck
// Subscribe to all new blocks
query := "tm.event = 'NewBlock'"
results, err := client.Subscribe(context.Background(), "test", query, 1000)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to subscribe to query.",
errorMessageKey, err.Error(),
"query", query)
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer func() {
if err := client.Unsubscribe(context.Background(), "test", query); err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to unsubscribe query.",
errorMessageKey, err.Error())
}
}() | signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
defer close(quit)
var lastProcessedBlock int64
data, err := sub.DB.Get([]byte(cosmosLevelDBKey), nil)
if err != nil {
log.Println("Error getting the last cosmos block from level db", err)
lastProcessedBlock = 0
} else {
lastProcessedBlock = new(big.Int).SetBytes(data).Int64()
}
for {
select {
case <-quit:
log.Println("we receive the quit signal and exit")
return
case e := <-results:
data, ok := e.Data.(tmTypes.EventDataNewBlock)
if !ok {
sub.SugaredLogger.Errorw("sifchain client failed to extract event data from new block.",
"EventDataNewBlock", fmt.Sprintf("%v", e.Data))
}
blockHeight := data.Block.Height
// Just start from current block number if never process any block before
if lastProcessedBlock == 0 {
lastProcessedBlock = blockHeight
}
sub.SugaredLogger.Infow("new sifchain block witnessed")
startBlockHeight := lastProcessedBlock + 1
sub.SugaredLogger.Infow("cosmos process events for blocks.",
"startingBlockHeight", startBlockHeight, "currentBlockHeight", blockHeight)
for blockNumber := startBlockHeight; blockNumber <= blockHeight; {
tmpBlockNumber := blockNumber
ctx := context.Background()
block, err := client.BlockResults(ctx, &tmpBlockNumber)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to get a block.",
errorMessageKey, err.Error())
continue
}
for _, txLog := range block.TxsResults {
sub.SugaredLogger.Infow("block.TxsResults: ", "block.TxsResults: ", block.TxsResults)
for _, event := range txLog.Events {
claimType := getOracleClaimType(event.GetType())
sub.SugaredLogger.Infow("claimtype cosmos.go: ", "claimType: ", claimType)
switch claimType {
case types.MsgBurn, types.MsgLock:
cosmosMsg, err := txs.BurnLockEventToCosmosMsg(claimType, event.GetAttributes(), symbolTranslator, sub.SugaredLogger)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed in get message from event.",
errorMessageKey, err.Error())
continue
}
sub.SugaredLogger.Infow(
"Received message from sifchain: ",
"msg", cosmosMsg,
)
sub.handleBurnLockMsg(cosmosMsg, claimType)
}
}
}
lastProcessedBlock = blockNumber
err = sub.DB.Put([]byte(cosmosLevelDBKey), big.NewInt(lastProcessedBlock).Bytes(), nil)
if err != nil {
// if you can't write to leveldb, then error out as something is seriously amiss
log.Fatalf("Error saving lastProcessedBlock to leveldb: %v", err)
}
blockNumber++
}
}
}
}
// GetAllProphecyClaim get all prophecy claims
func GetAllProphecyClaim(client *ethclient.Client, ethereumAddress common.Address, ethFromBlock int64, ethToBlock int64) []types.ProphecyClaimUnique {
log.Printf("getAllProphecyClaim from %d block to %d block\n", ethFromBlock, ethToBlock)
var prophecyClaimArray []types.ProphecyClaimUnique
// Used to recover address from transaction, the clientChainID doesn't work in ganache, hardcoded to 1
eIP155Signer := ethTypes.NewEIP155Signer(big.NewInt(1))
CosmosBridgeContractABI := contract.LoadABI(txs.CosmosBridge)
methodID := CosmosBridgeContractABI.Methods[types.NewProphecyClaim.String()].ID()
for blockNumber := ethFromBlock; blockNumber < ethToBlock; {
log.Printf("getAllProphecyClaim current blockNumber is %d\n", blockNumber)
block, err := client.BlockByNumber(context.Background(), big.NewInt(blockNumber))
if err != nil {
log.Printf("failed to get block from ethereum, block number is %d\n", blockNumber)
blockNumber++
continue
}
for _, tx := range block.Transactions() {
// recover sender from tx
sender, err := eIP155Signer.Sender(tx)
if err != nil {
log.Println("failed to recover sender from tx")
continue
}
// compare tx sender with my ethereum account
if sender != ethereumAddress {
// the prophecy claim not sent by me
continue
}
if len(tx.Data()) < 4 {
log.Println("the tx is not a smart contract call")
continue
}
// compare method id to check if it is NewProphecyClaim method
if bytes.Compare(tx.Data()[0:4], methodID) != 0 {
continue
}
// decode data via a hardcode method since the abi unpack failed
prophecyClaim, err := MyDecode(tx.Data()[4:])
if err != nil {
log.Printf("decode prophecy claim failed with %s \n", err.Error())
continue
}
// put matched prophecyClaim into result
prophecyClaimArray = append(prophecyClaimArray, prophecyClaim)
}
blockNumber++
}
return prophecyClaimArray
}
// MyDecode decode data in ProphecyClaim transaction
func MyDecode(data []byte) (types.ProphecyClaimUnique, error) {
if len(data) < 32*7+42 {
return types.ProphecyClaimUnique{}, errors.New("tx data length not enough")
}
src := data[64:96]
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
sequence, err := strconv.ParseUint(string(dst), 16, 32)
if err != nil {
return types.ProphecyClaimUnique{}, err
}
// the length of sifnode acc account is 42
return types.ProphecyClaimUnique{
CosmosSenderSequence: big.NewInt(int64(sequence)),
CosmosSender: data[32*7 : 32*7+42],
}, nil
}
// MessageProcessed check if cosmogs message already processed
func MessageProcessed(message types.CosmosMsg, prophecyClaims []types.ProphecyClaimUnique) bool {
for _, prophecyClaim := range prophecyClaims {
if bytes.Compare(message.CosmosSender, prophecyClaim.CosmosSender) == 0 &&
message.CosmosSenderSequence.Cmp(prophecyClaim.CosmosSenderSequence) == 0 {
return true
}
}
return false
}
// Replay the missed events
func (sub CosmosSub) Replay(symbolTranslator *symbol_translator.SymbolTranslator, fromBlock int64, toBlock int64, ethFromBlock int64, ethToBlock int64) {
// Start Ethereum client
ethClient, err := ethclient.Dial(sub.EthProvider)
if err != nil {
log.Printf("%s \n", err.Error())
return
}
clientChainID, err := ethClient.NetworkID(context.Background())
if err != nil {
log.Printf("%s \n", err.Error())
return
}
log.Printf("clientChainID is %d \n", clientChainID)
// Load the validator's ethereum address
mySender, err := txs.LoadSender()
if err != |
quit := make(chan os.Signal, 1) | random_line_split |
cosmos.go | Address: registryContractAddress,
PrivateKey: key,
DB: db,
SugaredLogger: sugaredLogger,
}
}
// Start a Cosmos chain subscription
func (sub CosmosSub) Start(completionEvent *sync.WaitGroup, symbolTranslator *symbol_translator.SymbolTranslator) {
defer completionEvent.Done()
time.Sleep(time.Second)
client, err := tmClient.New(sub.TmProvider, "/websocket")
if err != nil {
sub.SugaredLogger.Errorw("failed to initialize a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
if err := client.Start(); err != nil {
sub.SugaredLogger.Errorw("failed to start a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer client.Stop() //nolint:errcheck
// Subscribe to all new blocks
query := "tm.event = 'NewBlock'"
results, err := client.Subscribe(context.Background(), "test", query, 1000)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to subscribe to query.",
errorMessageKey, err.Error(),
"query", query)
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer func() {
if err := client.Unsubscribe(context.Background(), "test", query); err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to unsubscribe query.",
errorMessageKey, err.Error())
}
}()
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
defer close(quit)
var lastProcessedBlock int64
data, err := sub.DB.Get([]byte(cosmosLevelDBKey), nil)
if err != nil {
log.Println("Error getting the last cosmos block from level db", err)
lastProcessedBlock = 0
} else {
lastProcessedBlock = new(big.Int).SetBytes(data).Int64()
}
for {
select {
case <-quit:
log.Println("we receive the quit signal and exit")
return
case e := <-results:
data, ok := e.Data.(tmTypes.EventDataNewBlock)
if !ok {
sub.SugaredLogger.Errorw("sifchain client failed to extract event data from new block.",
"EventDataNewBlock", fmt.Sprintf("%v", e.Data))
}
blockHeight := data.Block.Height
// Just start from current block number if never process any block before
if lastProcessedBlock == 0 {
lastProcessedBlock = blockHeight
}
sub.SugaredLogger.Infow("new sifchain block witnessed")
startBlockHeight := lastProcessedBlock + 1
sub.SugaredLogger.Infow("cosmos process events for blocks.",
"startingBlockHeight", startBlockHeight, "currentBlockHeight", blockHeight)
for blockNumber := startBlockHeight; blockNumber <= blockHeight; {
tmpBlockNumber := blockNumber
ctx := context.Background()
block, err := client.BlockResults(ctx, &tmpBlockNumber)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to get a block.",
errorMessageKey, err.Error())
continue
}
for _, txLog := range block.TxsResults {
sub.SugaredLogger.Infow("block.TxsResults: ", "block.TxsResults: ", block.TxsResults)
for _, event := range txLog.Events {
claimType := getOracleClaimType(event.GetType())
sub.SugaredLogger.Infow("claimtype cosmos.go: ", "claimType: ", claimType)
switch claimType {
case types.MsgBurn, types.MsgLock:
cosmosMsg, err := txs.BurnLockEventToCosmosMsg(claimType, event.GetAttributes(), symbolTranslator, sub.SugaredLogger)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed in get message from event.",
errorMessageKey, err.Error())
continue
}
sub.SugaredLogger.Infow(
"Received message from sifchain: ",
"msg", cosmosMsg,
)
sub.handleBurnLockMsg(cosmosMsg, claimType)
}
}
}
lastProcessedBlock = blockNumber
err = sub.DB.Put([]byte(cosmosLevelDBKey), big.NewInt(lastProcessedBlock).Bytes(), nil)
if err != nil {
// if you can't write to leveldb, then error out as something is seriously amiss
log.Fatalf("Error saving lastProcessedBlock to leveldb: %v", err)
}
blockNumber++
}
}
}
}
// GetAllProphecyClaim get all prophecy claims
func GetAllProphecyClaim(client *ethclient.Client, ethereumAddress common.Address, ethFromBlock int64, ethToBlock int64) []types.ProphecyClaimUnique {
log.Printf("getAllProphecyClaim from %d block to %d block\n", ethFromBlock, ethToBlock)
var prophecyClaimArray []types.ProphecyClaimUnique
// Used to recover address from transaction, the clientChainID doesn't work in ganache, hardcoded to 1
eIP155Signer := ethTypes.NewEIP155Signer(big.NewInt(1))
CosmosBridgeContractABI := contract.LoadABI(txs.CosmosBridge)
methodID := CosmosBridgeContractABI.Methods[types.NewProphecyClaim.String()].ID()
for blockNumber := ethFromBlock; blockNumber < ethToBlock; | // the prophecy claim not sent by me
continue
}
if len(tx.Data()) < 4 {
log.Println("the tx is not a smart contract call")
continue
}
// compare method id to check if it is NewProphecyClaim method
if bytes.Compare(tx.Data()[0:4], methodID) != 0 {
continue
}
// decode data via a hardcode method since the abi unpack failed
prophecyClaim, err := MyDecode(tx.Data()[4:])
if err != nil {
log.Printf("decode prophecy claim failed with %s \n", err.Error())
continue
}
// put matched prophecyClaim into result
prophecyClaimArray = append(prophecyClaimArray, prophecyClaim)
}
blockNumber++
}
return prophecyClaimArray
}
// MyDecode decode data in ProphecyClaim transaction
func MyDecode(data []byte) (types.ProphecyClaimUnique, error) {
if len(data) < 32*7+42 {
return types.ProphecyClaimUnique{}, errors.New("tx data length not enough")
}
src := data[64:96]
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
sequence, err := strconv.ParseUint(string(dst), 16, 32)
if err != nil {
return types.ProphecyClaimUnique{}, err
}
// the length of sifnode acc account is 42
return types.ProphecyClaimUnique{
CosmosSenderSequence: big.NewInt(int64(sequence)),
CosmosSender: data[32*7 : 32*7+42],
}, nil
}
// MessageProcessed check if cosmogs message already processed
func MessageProcessed(message types.CosmosMsg, prophecyClaims []types.ProphecyClaimUnique) bool {
for _, prophecyClaim := range prophecyClaims {
if bytes.Compare(message.CosmosSender, prophecyClaim.CosmosSender) == 0 &&
message.CosmosSenderSequence.Cmp(prophecyClaim.CosmosSenderSequence) == 0 {
return true
}
}
return false
}
// Replay the missed events
func (sub CosmosSub) Replay(symbolTranslator *symbol_translator.SymbolTranslator, fromBlock int64, toBlock int64, ethFromBlock int64, ethToBlock int64) {
// Start Ethereum client
ethClient, err := ethclient.Dial(sub.EthProvider)
if err != nil {
log.Printf("%s \n", err.Error())
return
}
clientChainID, err := ethClient.NetworkID(context.Background())
if err != nil {
log.Printf("%s \n", err.Error())
return
}
log.Printf("clientChainID is %d \n", clientChainID)
// Load the validator's ethereum address
mySender, err := txs.LoadSender()
if err != | {
log.Printf("getAllProphecyClaim current blockNumber is %d\n", blockNumber)
block, err := client.BlockByNumber(context.Background(), big.NewInt(blockNumber))
if err != nil {
log.Printf("failed to get block from ethereum, block number is %d\n", blockNumber)
blockNumber++
continue
}
for _, tx := range block.Transactions() {
// recover sender from tx
sender, err := eIP155Signer.Sender(tx)
if err != nil {
log.Println("failed to recover sender from tx")
continue
}
// compare tx sender with my ethereum account
if sender != ethereumAddress { | conditional_block |
cosmos.go | : registryContractAddress,
PrivateKey: key,
DB: db,
SugaredLogger: sugaredLogger,
}
}
// Start a Cosmos chain subscription
func (sub CosmosSub) Start(completionEvent *sync.WaitGroup, symbolTranslator *symbol_translator.SymbolTranslator) {
defer completionEvent.Done()
time.Sleep(time.Second)
client, err := tmClient.New(sub.TmProvider, "/websocket")
if err != nil {
sub.SugaredLogger.Errorw("failed to initialize a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
if err := client.Start(); err != nil {
sub.SugaredLogger.Errorw("failed to start a sifchain client.",
errorMessageKey, err.Error())
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer client.Stop() //nolint:errcheck
// Subscribe to all new blocks
query := "tm.event = 'NewBlock'"
results, err := client.Subscribe(context.Background(), "test", query, 1000)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to subscribe to query.",
errorMessageKey, err.Error(),
"query", query)
completionEvent.Add(1)
go sub.Start(completionEvent, symbolTranslator)
return
}
defer func() {
if err := client.Unsubscribe(context.Background(), "test", query); err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to unsubscribe query.",
errorMessageKey, err.Error())
}
}()
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
defer close(quit)
var lastProcessedBlock int64
data, err := sub.DB.Get([]byte(cosmosLevelDBKey), nil)
if err != nil {
log.Println("Error getting the last cosmos block from level db", err)
lastProcessedBlock = 0
} else {
lastProcessedBlock = new(big.Int).SetBytes(data).Int64()
}
for {
select {
case <-quit:
log.Println("we receive the quit signal and exit")
return
case e := <-results:
data, ok := e.Data.(tmTypes.EventDataNewBlock)
if !ok {
sub.SugaredLogger.Errorw("sifchain client failed to extract event data from new block.",
"EventDataNewBlock", fmt.Sprintf("%v", e.Data))
}
blockHeight := data.Block.Height
// Just start from current block number if never process any block before
if lastProcessedBlock == 0 {
lastProcessedBlock = blockHeight
}
sub.SugaredLogger.Infow("new sifchain block witnessed")
startBlockHeight := lastProcessedBlock + 1
sub.SugaredLogger.Infow("cosmos process events for blocks.",
"startingBlockHeight", startBlockHeight, "currentBlockHeight", blockHeight)
for blockNumber := startBlockHeight; blockNumber <= blockHeight; {
tmpBlockNumber := blockNumber
ctx := context.Background()
block, err := client.BlockResults(ctx, &tmpBlockNumber)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to get a block.",
errorMessageKey, err.Error())
continue
}
for _, txLog := range block.TxsResults {
sub.SugaredLogger.Infow("block.TxsResults: ", "block.TxsResults: ", block.TxsResults)
for _, event := range txLog.Events {
claimType := getOracleClaimType(event.GetType())
sub.SugaredLogger.Infow("claimtype cosmos.go: ", "claimType: ", claimType)
switch claimType {
case types.MsgBurn, types.MsgLock:
cosmosMsg, err := txs.BurnLockEventToCosmosMsg(claimType, event.GetAttributes(), symbolTranslator, sub.SugaredLogger)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed in get message from event.",
errorMessageKey, err.Error())
continue
}
sub.SugaredLogger.Infow(
"Received message from sifchain: ",
"msg", cosmosMsg,
)
sub.handleBurnLockMsg(cosmosMsg, claimType)
}
}
}
lastProcessedBlock = blockNumber
err = sub.DB.Put([]byte(cosmosLevelDBKey), big.NewInt(lastProcessedBlock).Bytes(), nil)
if err != nil {
// if you can't write to leveldb, then error out as something is seriously amiss
log.Fatalf("Error saving lastProcessedBlock to leveldb: %v", err)
}
blockNumber++
}
}
}
}
// GetAllProphecyClaim get all prophecy claims
func GetAllProphecyClaim(client *ethclient.Client, ethereumAddress common.Address, ethFromBlock int64, ethToBlock int64) []types.ProphecyClaimUnique {
log.Printf("getAllProphecyClaim from %d block to %d block\n", ethFromBlock, ethToBlock)
var prophecyClaimArray []types.ProphecyClaimUnique
// Used to recover address from transaction, the clientChainID doesn't work in ganache, hardcoded to 1
eIP155Signer := ethTypes.NewEIP155Signer(big.NewInt(1))
CosmosBridgeContractABI := contract.LoadABI(txs.CosmosBridge)
methodID := CosmosBridgeContractABI.Methods[types.NewProphecyClaim.String()].ID()
for blockNumber := ethFromBlock; blockNumber < ethToBlock; {
log.Printf("getAllProphecyClaim current blockNumber is %d\n", blockNumber)
block, err := client.BlockByNumber(context.Background(), big.NewInt(blockNumber))
if err != nil {
log.Printf("failed to get block from ethereum, block number is %d\n", blockNumber)
blockNumber++
continue
}
for _, tx := range block.Transactions() {
// recover sender from tx
sender, err := eIP155Signer.Sender(tx)
if err != nil {
log.Println("failed to recover sender from tx")
continue
}
// compare tx sender with my ethereum account
if sender != ethereumAddress {
// the prophecy claim not sent by me
continue
}
if len(tx.Data()) < 4 {
log.Println("the tx is not a smart contract call")
continue
}
// compare method id to check if it is NewProphecyClaim method
if bytes.Compare(tx.Data()[0:4], methodID) != 0 {
continue
}
// decode data via a hardcode method since the abi unpack failed
prophecyClaim, err := MyDecode(tx.Data()[4:])
if err != nil {
log.Printf("decode prophecy claim failed with %s \n", err.Error())
continue
}
// put matched prophecyClaim into result
prophecyClaimArray = append(prophecyClaimArray, prophecyClaim)
}
blockNumber++
}
return prophecyClaimArray
}
// MyDecode decode data in ProphecyClaim transaction
func MyDecode(data []byte) (types.ProphecyClaimUnique, error) {
if len(data) < 32*7+42 {
return types.ProphecyClaimUnique{}, errors.New("tx data length not enough")
}
src := data[64:96]
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
sequence, err := strconv.ParseUint(string(dst), 16, 32)
if err != nil {
return types.ProphecyClaimUnique{}, err
}
// the length of sifnode acc account is 42
return types.ProphecyClaimUnique{
CosmosSenderSequence: big.NewInt(int64(sequence)),
CosmosSender: data[32*7 : 32*7+42],
}, nil
}
// MessageProcessed check if cosmogs message already processed
func MessageProcessed(message types.CosmosMsg, prophecyClaims []types.ProphecyClaimUnique) bool {
for _, prophecyClaim := range prophecyClaims {
if bytes.Compare(message.CosmosSender, prophecyClaim.CosmosSender) == 0 &&
message.CosmosSenderSequence.Cmp(prophecyClaim.CosmosSenderSequence) == 0 {
return true
}
}
return false
}
// Replay the missed events
func (sub CosmosSub) | (symbolTranslator *symbol_translator.SymbolTranslator, fromBlock int64, toBlock int64, ethFromBlock int64, ethToBlock int64) {
// Start Ethereum client
ethClient, err := ethclient.Dial(sub.EthProvider)
if err != nil {
log.Printf("%s \n", err.Error())
return
}
clientChainID, err := ethClient.NetworkID(context.Background())
if err != nil {
log.Printf("%s \n", err.Error())
return
}
log.Printf("clientChainID is %d \n", clientChainID)
// Load the validator's ethereum address
mySender, err := txs.LoadSender()
if err != | Replay | identifier_name |
cosmos.go | {
sub.SugaredLogger.Errorw("sifchain client failed to extract event data from new block.",
"EventDataNewBlock", fmt.Sprintf("%v", e.Data))
}
blockHeight := data.Block.Height
// Just start from current block number if never process any block before
if lastProcessedBlock == 0 {
lastProcessedBlock = blockHeight
}
sub.SugaredLogger.Infow("new sifchain block witnessed")
startBlockHeight := lastProcessedBlock + 1
sub.SugaredLogger.Infow("cosmos process events for blocks.",
"startingBlockHeight", startBlockHeight, "currentBlockHeight", blockHeight)
for blockNumber := startBlockHeight; blockNumber <= blockHeight; {
tmpBlockNumber := blockNumber
ctx := context.Background()
block, err := client.BlockResults(ctx, &tmpBlockNumber)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed to get a block.",
errorMessageKey, err.Error())
continue
}
for _, txLog := range block.TxsResults {
sub.SugaredLogger.Infow("block.TxsResults: ", "block.TxsResults: ", block.TxsResults)
for _, event := range txLog.Events {
claimType := getOracleClaimType(event.GetType())
sub.SugaredLogger.Infow("claimtype cosmos.go: ", "claimType: ", claimType)
switch claimType {
case types.MsgBurn, types.MsgLock:
cosmosMsg, err := txs.BurnLockEventToCosmosMsg(claimType, event.GetAttributes(), symbolTranslator, sub.SugaredLogger)
if err != nil {
sub.SugaredLogger.Errorw("sifchain client failed in get message from event.",
errorMessageKey, err.Error())
continue
}
sub.SugaredLogger.Infow(
"Received message from sifchain: ",
"msg", cosmosMsg,
)
sub.handleBurnLockMsg(cosmosMsg, claimType)
}
}
}
lastProcessedBlock = blockNumber
err = sub.DB.Put([]byte(cosmosLevelDBKey), big.NewInt(lastProcessedBlock).Bytes(), nil)
if err != nil {
// if you can't write to leveldb, then error out as something is seriously amiss
log.Fatalf("Error saving lastProcessedBlock to leveldb: %v", err)
}
blockNumber++
}
}
}
}
// GetAllProphecyClaim get all prophecy claims
func GetAllProphecyClaim(client *ethclient.Client, ethereumAddress common.Address, ethFromBlock int64, ethToBlock int64) []types.ProphecyClaimUnique {
log.Printf("getAllProphecyClaim from %d block to %d block\n", ethFromBlock, ethToBlock)
var prophecyClaimArray []types.ProphecyClaimUnique
// Used to recover address from transaction, the clientChainID doesn't work in ganache, hardcoded to 1
eIP155Signer := ethTypes.NewEIP155Signer(big.NewInt(1))
CosmosBridgeContractABI := contract.LoadABI(txs.CosmosBridge)
methodID := CosmosBridgeContractABI.Methods[types.NewProphecyClaim.String()].ID()
for blockNumber := ethFromBlock; blockNumber < ethToBlock; {
log.Printf("getAllProphecyClaim current blockNumber is %d\n", blockNumber)
block, err := client.BlockByNumber(context.Background(), big.NewInt(blockNumber))
if err != nil {
log.Printf("failed to get block from ethereum, block number is %d\n", blockNumber)
blockNumber++
continue
}
for _, tx := range block.Transactions() {
// recover sender from tx
sender, err := eIP155Signer.Sender(tx)
if err != nil {
log.Println("failed to recover sender from tx")
continue
}
// compare tx sender with my ethereum account
if sender != ethereumAddress {
// the prophecy claim not sent by me
continue
}
if len(tx.Data()) < 4 {
log.Println("the tx is not a smart contract call")
continue
}
// compare method id to check if it is NewProphecyClaim method
if bytes.Compare(tx.Data()[0:4], methodID) != 0 {
continue
}
// decode data via a hardcode method since the abi unpack failed
prophecyClaim, err := MyDecode(tx.Data()[4:])
if err != nil {
log.Printf("decode prophecy claim failed with %s \n", err.Error())
continue
}
// put matched prophecyClaim into result
prophecyClaimArray = append(prophecyClaimArray, prophecyClaim)
}
blockNumber++
}
return prophecyClaimArray
}
// MyDecode decode data in ProphecyClaim transaction
func MyDecode(data []byte) (types.ProphecyClaimUnique, error) {
if len(data) < 32*7+42 {
return types.ProphecyClaimUnique{}, errors.New("tx data length not enough")
}
src := data[64:96]
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
sequence, err := strconv.ParseUint(string(dst), 16, 32)
if err != nil {
return types.ProphecyClaimUnique{}, err
}
// the length of sifnode acc account is 42
return types.ProphecyClaimUnique{
CosmosSenderSequence: big.NewInt(int64(sequence)),
CosmosSender: data[32*7 : 32*7+42],
}, nil
}
// MessageProcessed check if cosmogs message already processed
func MessageProcessed(message types.CosmosMsg, prophecyClaims []types.ProphecyClaimUnique) bool {
for _, prophecyClaim := range prophecyClaims {
if bytes.Compare(message.CosmosSender, prophecyClaim.CosmosSender) == 0 &&
message.CosmosSenderSequence.Cmp(prophecyClaim.CosmosSenderSequence) == 0 {
return true
}
}
return false
}
// Replay the missed events
func (sub CosmosSub) Replay(symbolTranslator *symbol_translator.SymbolTranslator, fromBlock int64, toBlock int64, ethFromBlock int64, ethToBlock int64) {
// Start Ethereum client
ethClient, err := ethclient.Dial(sub.EthProvider)
if err != nil {
log.Printf("%s \n", err.Error())
return
}
clientChainID, err := ethClient.NetworkID(context.Background())
if err != nil {
log.Printf("%s \n", err.Error())
return
}
log.Printf("clientChainID is %d \n", clientChainID)
// Load the validator's ethereum address
mySender, err := txs.LoadSender()
if err != nil {
log.Println(err)
return
}
ProphecyClaims := GetAllProphecyClaim(ethClient, mySender, ethFromBlock, ethToBlock)
log.Printf("found out %d prophecy claims I sent from %d to %d block\n", len(ProphecyClaims), ethFromBlock, ethToBlock)
client, err := tmClient.New(sub.TmProvider, "/websocket")
if err != nil {
log.Printf("failed to initialize a client, error as %s\n", err)
return
}
if err := client.Start(); err != nil {
log.Printf("failed to start a client, error as %s\n", err)
return
}
defer client.Stop() //nolint:errcheck
for blockNumber := fromBlock; blockNumber < toBlock; {
tmpBlockNumber := blockNumber
ctx := context.Background()
block, err := client.BlockResults(ctx, &tmpBlockNumber)
blockNumber++
log.Printf("Replay start to process block %d\n", blockNumber)
if err != nil {
log.Printf("failed to start a client %s\n", err.Error())
continue
}
for _, ethLog := range block.TxsResults {
for _, event := range ethLog.Events {
claimType := getOracleClaimType(event.GetType())
switch claimType {
case types.MsgBurn, types.MsgLock:
log.Println("found out a lock burn message")
cosmosMsg, err := txs.BurnLockEventToCosmosMsg(claimType, event.GetAttributes(), symbolTranslator, sub.SugaredLogger)
if err != nil {
log.Println(err)
continue
}
log.Printf("found out a lock burn message%s\n", cosmosMsg.String())
if !MessageProcessed(cosmosMsg, ProphecyClaims) {
sub.handleBurnLockMsg(cosmosMsg, claimType)
} else {
log.Println("lock burn message already processed by me")
}
}
}
}
}
}
// getOracleClaimType sets the OracleClaim's claim type based upon the witnessed event type
func getOracleClaimType(eventType string) types.Event | {
var claimType types.Event
switch eventType {
case types.MsgBurn.String():
claimType = types.MsgBurn
case types.MsgLock.String():
claimType = types.MsgLock
default:
claimType = types.Unsupported
}
return claimType
} | identifier_body |
|
mod.rs | I2C_SL_DELAY_COUNT: Mmio<u32>,
pub I2C_SL_INT_MASK: Mmio<u32>,
pub I2C_SL_INT_SOURCE: Mmio<u32>,
pub I2C_SL_INT_SET: Mmio<u32>,
_0x4C: Mmio<u32>,
pub I2C_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_RX_FIFO: Mmio<u32>,
pub PACKET_TRANSFER_STATUS: Mmio<u32>,
pub FIFO_CONTROL: Mmio<u32>,
pub FIFO_STATUS: Mmio<u32>,
pub INTERRUPT_MASK_REGISTER: Mmio<u32>,
pub INTERRUPT_STATUS_REGISTER: Mmio<u32>,
pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>,
pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_SLV_RX_FIFO: Mmio<u32>,
pub I2C_SLV_PACKET_STATUS: Mmio<u32>,
pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>,
pub I2C_BUS_CLEAR_STATUS: Mmio<u32>,
pub I2C_CONFIG_LOAD: Mmio<u32>,
_0x90: Mmio<u32>,
pub I2C_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_INTERFACE_TIMING_1: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>,
}
/// Representation of an I²C controller.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct I2c {
/// The device clock for the controller.
clock: &'static Clock,
/// A pointer to the registers used for communication.
registers: *const Registers,
}
// Definitions for known I²C devices.
impl I2c {
/// Representation of the I²C controller 1.
pub const C1: Self = I2c {
clock: &Clock::I2C_1,
registers: (I2C_1234_BASE + 0) as *const Registers,
};
/// Representation of the I²C controller 2.
pub const C2: Self = I2c {
clock: &Clock::I2C_2,
registers: (I2C_1234_BASE + 0x400) as *const Registers,
};
/// Representation of the I²C controller 3.
pub const C3: Self = I2c {
clock: &Clock::I2C_3,
registers: (I2C_1234_BASE + 0x500) as *const Registers,
};
/// Representation of the I²C controller 4.
pub const C4: Self = I2c {
clock: &Clock::I2C_4,
registers: (I2C_1234_BASE + 0x700) as *const Registers,
};
/// Representation of the I²C controller 5.
pub const C5: Self = I2c {
clock: &Clock::I2C_5,
registers: (I2C_56_BASE + 0x000) as *const Registers,
};
/// Representation of the I²C controller 6.
pub const C6: Self = I2c {
clock: &Clock::I2C_6,
registers: (I2C_56_BASE + 0x100) as *const Registers,
};
}
impl I2c {
/// Loads the hardware configuration for the I²C.
fn load_config(&self) {
let register_base = unsafe { &*self.registers };
// Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit.
register_base.I2C_CONFIG_LOAD.write(0x25);
// Wait up to 20 microseconds for master config to be loaded.
for i in 0..20 {
usleep(i);
if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 {
break;
}
}
}
/// Transmits the data to the device over I²C.
fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit write mode.
register_base.I2C_CMD_ADDR0.write(device << 1);
// Load in data to write.
let data_source = u32::from_le_bytes(data.try_into().unwrap());
register_base.I2C_CMD_DATA1.write(data_source);
// Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL.
if register_base.I2C_STATUS.read() & 0xF == 0 {
return Ok(());
} else {
return Err(Error::TransmissionFailed);
}
}
/// Receives bytes from the device over I²C and writes them to the buffer.
fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit read mode.
register_base.I2C_CMD_ADDR0.write((device << 1) | 1);
// Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Ensure success.
if register_base.I2C_STATUS.read() & 0xF != 0 {
return Err(Error::QueryFailed);
}
// Read result and copy it back.
let result = register_base.I2C_CMD_DATA1.read().to_le_bytes();
buffer.copy_from_slice(&result[..buffer.len()]);
Ok(())
}
/// Initializes the I²C controller.
pub fn init(&self) {
let register_base = unsafe { &*self.registers };
// Enable device clock.
self.clock.enable();
// Setup divisor and clear the bus.
register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001);
register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003);
// Load hardware configuration.
self.load_config();
// Wait a while until BUS_CLEAR_DONE is set.
for _ in 0..10 {
usleep(20000);
if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800 != 0 {
break;
}
}
// Dummy read.
register_base.I2C_BUS_CLEAR_STATUS.read();
// Read and set the Interrupt Status.
register_base.INTERRUPT_STATUS_REGISTER
.write(register_base.INTERRUPT_STATUS_REGISTER.read());
}
/// Writes a buffer of data to a given device over I²C.
pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> {
// Limit input size to 32-bits. One byte is reserved for the device register.
if data.len() > 3 {
return Err(Error::BufferBoundariesBlown);
}
// Prepare a buffer holding the device register and the data contents.
let mut buffer = [0; 4];
buffer[0] = register;
buffer[1..].copy_from_slice(data);
// Send the buffer to the device.
self.send(device, &buffer[..])
}
/// Writes an byte to a given device over I²C.
#[inline]
pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> {
// Write single byte to | device.
self.write(device, register, &byte.to_le_bytes())
}
/// Reads a register of a d | identifier_body |
|
mod.rs | The [`Sync`] trait is implemented for [`I2c`], it is considered
//! safe to share references between threads.
//!
//! - [`send_pmic_cpu_shutdown_cmd`], [`read_ti_charger_bit_7`],
//! [`clear_ti_charger_bit_7`] and [`set_ti_charger_bit_7`] are helper
//! functions which wrap common I2C operations.
//!
//! [`Registers`]: struct.Registers.html
//! [`I2c`]: struct.I2c.html
//! [`Clock`]: ../clock/struct.Clock.html
//! [`I2c::init`]: struct.I2c.html#method.init
//! [`I2c::read`]: struct.I2c.html#method.read
//! [`I2c::write`]: struct.I2c.html#method.write
//! [`Sync`]: https://doc.rust-lang.org/nightly/core/marker/trait.Sync.html
//! [`send_pmic_cpu_shutdown_cmd`]: fn.send_pmic_cpu_shutdown_cmd.html
//! [`read_ti_charger_bit_7`]: fn.read_ti_charger_bit_7.html
//! [`clear_ti_charger_bit_7`]: fn.clear_ti_charger_bit_7.html
//! [`set_ti_charger_bit_7`]: fn.set_ti_charger_bit_7.html
use core::{convert::TryInto, marker::{Send, Sync}};
use mirage_mmio::Mmio;
use crate::{clock::Clock, timer::usleep};
/// Base address for the I²C registers 1 through 4.
pub(crate) const I2C_1234_BASE: u32 = 0x7000_C000;
/// Base address for the I²C registers 5 through 6.
pub(crate) const I2C_56_BASE: u32 = 0x7000_D000;
/// The I²C device address for the Maxim 77621 CPU.
pub const MAX77621_CPU_I2C_ADDR: u32 = 0x1B;
/// The I²C device address for the Maxim 77621 GPU.
pub const MAX77621_GPU_I2C_ADDR: u32 = 0x1C;
/// The I²C device address for the Maxim 17050.
pub const MAX17050_I2C_ADDR: u32 = 0x36;
/// The I²C device address for the Maxim 77620 PWR.
pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C;
/// The I²C device address for the Maxim 77620 RTC.
pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68;
/// The I²C device address for the TI BQ24193.
pub const BQ24193_I2C_ADDR: u32 = 0x6B;
/// Enumeration of possible I²C errors that may occur.
#[derive(Debug)]
pub enum Error {
/// Returned in case the boundaries of a buffer used for
/// read and write operations exceed the permitted size.
BufferBoundariesBlown,
/// Returned when the transmission over I²C errors.
TransmissionFailed,
/// Returned when a querying error for a device occurs.
QueryFailed,
}
/// Representation of the I²C registers.
#[allow(non_snake_case)]
#[repr(C)] | pub I2C_CMD_DATA2: Mmio<u32>,
_0x14: Mmio<u32>,
_0x18: Mmio<u32>,
pub I2C_STATUS: Mmio<u32>,
pub I2C_SL_CNFG: Mmio<u32>,
pub I2C_SL_RCVD: Mmio<u32>,
pub I2C_SL_STATUS: Mmio<u32>,
pub I2C_SL_ADDR1: Mmio<u32>,
pub I2C_SL_ADDR2: Mmio<u32>,
pub I2C_TLOW_SEXT: Mmio<u32>,
_0x38: Mmio<u32>,
pub I2C_SL_DELAY_COUNT: Mmio<u32>,
pub I2C_SL_INT_MASK: Mmio<u32>,
pub I2C_SL_INT_SOURCE: Mmio<u32>,
pub I2C_SL_INT_SET: Mmio<u32>,
_0x4C: Mmio<u32>,
pub I2C_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_RX_FIFO: Mmio<u32>,
pub PACKET_TRANSFER_STATUS: Mmio<u32>,
pub FIFO_CONTROL: Mmio<u32>,
pub FIFO_STATUS: Mmio<u32>,
pub INTERRUPT_MASK_REGISTER: Mmio<u32>,
pub INTERRUPT_STATUS_REGISTER: Mmio<u32>,
pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>,
pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_SLV_RX_FIFO: Mmio<u32>,
pub I2C_SLV_PACKET_STATUS: Mmio<u32>,
pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>,
pub I2C_BUS_CLEAR_STATUS: Mmio<u32>,
pub I2C_CONFIG_LOAD: Mmio<u32>,
_0x90: Mmio<u32>,
pub I2C_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_INTERFACE_TIMING_1: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>,
}
/// Representation of an I²C controller.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct I2c {
/// The device clock for the controller.
clock: &'static Clock,
/// A pointer to the registers used for communication.
registers: *const Registers,
}
// Definitions for known I²C devices.
impl I2c {
/// Representation of the I²C controller 1.
pub const C1: Self = I2c {
clock: &Clock::I2C_1,
registers: (I2C_1234_BASE + 0) as *const Registers,
};
/// Representation of the I²C controller 2.
pub const C2: Self = I2c {
clock: &Clock::I2C_2,
registers: (I2C_1234_BASE + 0x400) as *const Registers,
};
/// Representation of the I²C controller 3.
pub const C3: Self = I2c {
clock: &Clock::I2C_3,
registers: (I2C_1234_BASE + 0x500) as *const Registers,
};
/// Representation of the I²C controller 4.
pub const C4: Self = I2c {
clock: &Clock::I2C_4,
registers: (I2C_1234_BASE + 0x700) as *const Registers,
};
/// Representation of the I²C controller 5.
pub const C5: Self = I2c {
clock: &Clock::I2C_5,
registers: (I2C_56_BASE + 0x000) as *const Registers,
};
/// Representation of the I²C controller 6.
pub const C6: Self = I2c {
clock: &Clock::I2C_6,
registers: (I2C_56_BASE + 0x100) as *const Registers,
};
}
impl I2c {
/// Loads the hardware configuration for the I²C.
fn load_config(&self) {
let register_base = unsafe { &*self.registers };
// Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit.
register_base.I2C_CONFIG_LOAD.write(0x25);
// Wait up to 20 microseconds for master config to be loaded.
for i in 0..20 {
usleep(i);
if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 {
break;
}
}
}
/// Transmits the data to the device over I²C.
fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit write mode.
register_base.I2 | pub struct Registers {
pub I2C_CNFG: Mmio<u32>,
pub I2C_CMD_ADDR0: Mmio<u32>,
pub I2C_CMD_ADDR1: Mmio<u32>,
pub I2C_CMD_DATA1: Mmio<u32>, | random_line_split |
mod.rs | pub I2C_SL_INT_SOURCE: Mmio<u32>,
pub I2C_SL_INT_SET: Mmio<u32>,
_0x4C: Mmio<u32>,
pub I2C_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_RX_FIFO: Mmio<u32>,
pub PACKET_TRANSFER_STATUS: Mmio<u32>,
pub FIFO_CONTROL: Mmio<u32>,
pub FIFO_STATUS: Mmio<u32>,
pub INTERRUPT_MASK_REGISTER: Mmio<u32>,
pub INTERRUPT_STATUS_REGISTER: Mmio<u32>,
pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>,
pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_SLV_RX_FIFO: Mmio<u32>,
pub I2C_SLV_PACKET_STATUS: Mmio<u32>,
pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>,
pub I2C_BUS_CLEAR_STATUS: Mmio<u32>,
pub I2C_CONFIG_LOAD: Mmio<u32>,
_0x90: Mmio<u32>,
pub I2C_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_INTERFACE_TIMING_1: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>,
}
/// Representation of an I²C controller.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct I2c {
/// The device clock for the controller.
clock: &'static Clock,
/// A pointer to the registers used for communication.
registers: *const Registers,
}
// Definitions for known I²C devices.
impl I2c {
/// Representation of the I²C controller 1.
pub const C1: Self = I2c {
clock: &Clock::I2C_1,
registers: (I2C_1234_BASE + 0) as *const Registers,
};
/// Representation of the I²C controller 2.
pub const C2: Self = I2c {
clock: &Clock::I2C_2,
registers: (I2C_1234_BASE + 0x400) as *const Registers,
};
/// Representation of the I²C controller 3.
pub const C3: Self = I2c {
clock: &Clock::I2C_3,
registers: (I2C_1234_BASE + 0x500) as *const Registers,
};
/// Representation of the I²C controller 4.
pub const C4: Self = I2c {
clock: &Clock::I2C_4,
registers: (I2C_1234_BASE + 0x700) as *const Registers,
};
/// Representation of the I²C controller 5.
pub const C5: Self = I2c {
clock: &Clock::I2C_5,
registers: (I2C_56_BASE + 0x000) as *const Registers,
};
/// Representation of the I²C controller 6.
pub const C6: Self = I2c {
clock: &Clock::I2C_6,
registers: (I2C_56_BASE + 0x100) as *const Registers,
};
}
impl I2c {
/// Loads the hardware configuration for the I²C.
fn load_config(&self) {
let register_base = unsafe { &*self.registers };
// Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit.
register_base.I2C_CONFIG_LOAD.write(0x25);
// Wait up to 20 microseconds for master config to be loaded.
for i in 0..20 {
usleep(i);
if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 {
break;
}
}
}
/// Transmits the data to the device over I²C.
fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit write mode.
register_base.I2C_CMD_ADDR0.write(device << 1);
// Load in data to write.
let data_source = u32::from_le_bytes(data.try_into().unwrap());
register_base.I2C_CMD_DATA1.write(data_source);
// Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL.
if register_base.I2C_STATUS.read() & 0xF == 0 {
return Ok(());
} else {
return Err(Error::TransmissionFailed);
}
}
/// Receives bytes from the device over I²C and writes them to the buffer.
fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit read mode.
register_base.I2C_CMD_ADDR0.write((device << 1) | 1);
// Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Ensure success.
if register_base.I2C_STATUS.read() & 0xF != 0 {
return Err(Error::QueryFailed);
}
// Read result and copy it back.
let result = register_base.I2C_CMD_DATA1.read().to_le_bytes();
buffer.copy_from_slice(&result[..buffer.len()]);
Ok(())
}
/// Initializes the I²C controller.
pub fn init(&self) {
let register_base = unsafe { &*self.registers };
// Enable device clock.
self.clock.enable();
// Setup divisor and clear the bus.
register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001);
register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003);
// Load hardware configuration.
self.load_config();
// Wait a while until BUS_CLEAR_DONE is set.
for _ in 0..10 {
usleep(20000);
if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800 != 0 {
break;
}
}
// Dummy read.
register_base.I2C_BUS_CLEAR_STATUS.read();
// Read and set the Interrupt Status.
register_base.INTERRUPT_STATUS_REGISTER
.write(register_base.INTERRUPT_STATUS_REGISTER.read());
}
/// Writes a buffer of data to a given device over I²C.
pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> {
// Limit input size to 32-bits. One byte is reserved for the device register.
if data.len() > 3 {
return Err(Error::BufferBoundariesBlown);
}
// Prepare a buffer holding the device register and the data contents.
let mut buffer = [0; 4];
buffer[0] = register;
buffer[1..].copy_from_slice(data);
// Send the buffer to the device.
self.send(device, &buffer[..])
}
/// Writes an byte to a given device over I²C.
#[inline]
pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> {
// Write single byte to device.
self.write(device, register, &byte.to_le_bytes())
}
/// Reads a register of a device over I²C and writes the result to the buffer.
pub fn read(&self, device: u32, register: | u8, | identifier_name |
|
mod.rs | 2 = 0x36;
/// The I²C device address for the Maxim 77620 PWR.
pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C;
/// The I²C device address for the Maxim 77620 RTC.
pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68;
/// The I²C device address for the TI BQ24193.
pub const BQ24193_I2C_ADDR: u32 = 0x6B;
/// Enumeration of possible I²C errors that may occur.
#[derive(Debug)]
pub enum Error {
/// Returned in case the boundaries of a buffer used for
/// read and write operations exceed the permitted size.
BufferBoundariesBlown,
/// Returned when the transmission over I²C errors.
TransmissionFailed,
/// Returned when a querying error for a device occurs.
QueryFailed,
}
/// Representation of the I²C registers.
#[allow(non_snake_case)]
#[repr(C)]
pub struct Registers {
pub I2C_CNFG: Mmio<u32>,
pub I2C_CMD_ADDR0: Mmio<u32>,
pub I2C_CMD_ADDR1: Mmio<u32>,
pub I2C_CMD_DATA1: Mmio<u32>,
pub I2C_CMD_DATA2: Mmio<u32>,
_0x14: Mmio<u32>,
_0x18: Mmio<u32>,
pub I2C_STATUS: Mmio<u32>,
pub I2C_SL_CNFG: Mmio<u32>,
pub I2C_SL_RCVD: Mmio<u32>,
pub I2C_SL_STATUS: Mmio<u32>,
pub I2C_SL_ADDR1: Mmio<u32>,
pub I2C_SL_ADDR2: Mmio<u32>,
pub I2C_TLOW_SEXT: Mmio<u32>,
_0x38: Mmio<u32>,
pub I2C_SL_DELAY_COUNT: Mmio<u32>,
pub I2C_SL_INT_MASK: Mmio<u32>,
pub I2C_SL_INT_SOURCE: Mmio<u32>,
pub I2C_SL_INT_SET: Mmio<u32>,
_0x4C: Mmio<u32>,
pub I2C_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_RX_FIFO: Mmio<u32>,
pub PACKET_TRANSFER_STATUS: Mmio<u32>,
pub FIFO_CONTROL: Mmio<u32>,
pub FIFO_STATUS: Mmio<u32>,
pub INTERRUPT_MASK_REGISTER: Mmio<u32>,
pub INTERRUPT_STATUS_REGISTER: Mmio<u32>,
pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>,
pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>,
pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>,
pub I2C_SLV_RX_FIFO: Mmio<u32>,
pub I2C_SLV_PACKET_STATUS: Mmio<u32>,
pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>,
pub I2C_BUS_CLEAR_STATUS: Mmio<u32>,
pub I2C_CONFIG_LOAD: Mmio<u32>,
_0x90: Mmio<u32>,
pub I2C_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_INTERFACE_TIMING_1: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>,
pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>,
}
/// Representation of an I²C controller.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct I2c {
/// The device clock for the controller.
clock: &'static Clock,
/// A pointer to the registers used for communication.
registers: *const Registers,
}
// Definitions for known I²C devices.
impl I2c {
/// Representation of the I²C controller 1.
pub const C1: Self = I2c {
clock: &Clock::I2C_1,
registers: (I2C_1234_BASE + 0) as *const Registers,
};
/// Representation of the I²C controller 2.
pub const C2: Self = I2c {
clock: &Clock::I2C_2,
registers: (I2C_1234_BASE + 0x400) as *const Registers,
};
/// Representation of the I²C controller 3.
pub const C3: Self = I2c {
clock: &Clock::I2C_3,
registers: (I2C_1234_BASE + 0x500) as *const Registers,
};
/// Representation of the I²C controller 4.
pub const C4: Self = I2c {
clock: &Clock::I2C_4,
registers: (I2C_1234_BASE + 0x700) as *const Registers,
};
/// Representation of the I²C controller 5.
pub const C5: Self = I2c {
clock: &Clock::I2C_5,
registers: (I2C_56_BASE + 0x000) as *const Registers,
};
/// Representation of the I²C controller 6.
pub const C6: Self = I2c {
clock: &Clock::I2C_6,
registers: (I2C_56_BASE + 0x100) as *const Registers,
};
}
impl I2c {
/// Loads the hardware configuration for the I²C.
fn load_config(&self) {
let register_base = unsafe { &*self.registers };
// Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit.
register_base.I2C_CONFIG_LOAD.write(0x25);
// Wait up to 20 microseconds for master config to be loaded.
for i in 0..20 {
usleep(i);
if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 {
break;
}
}
}
/// Transmits the data to the device over I²C.
fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit write mode.
register_base.I2C_CMD_ADDR0.write(device << 1);
// Load in data to write.
let data_source = u32::from_le_bytes(data.try_into().unwrap());
register_base.I2C_CMD_DATA1.write(data_source);
// Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL.
if register_base.I2C_STATUS.read() & 0xF == 0 {
return Ok(());
} else {
return Err(Error::TransmissionFailed);
}
}
/// Receives bytes from the device over I²C and writes them to the buffer.
fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> {
let register_base = unsafe { &*self.registers };
// Set device for 7-bit read mode.
register_base.I2C_CMD_ADDR0.write((device << 1) | 1);
// Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T.
register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32);
// Load hardware configuration.
self.load_config();
// CONFIG |= SEND.
register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200);
while register_base.I2C_STATUS.read() & 0x100 != 0 {
// Wait until not busy.
}
// Ensure success.
if register_base.I2C_STATUS.read() & 0xF != 0 {
return Err(Error | ::QueryFailed);
}
// Read result and c | conditional_block |
|
FP.py | for item in x:
tree.delete(item)
def LbClick1(event):#listbox点击
try:
showPic3(lb.get(lb.curselection()))
except:
pass
def LbClick2(event):#listbox双击
try:
print (lb.get(lb.curselection()))
#读取图像
im=Image.open(lb.get(lb.curselection()))
#显示图像
im.show()
except:
pass
def lbExecute(listT):#listbox处理
lb.delete(0, END) #先清除
for item in listT:#再输出
lb.insert(END, item)
#执行搜图
import Execute
def start():
if var1.get()=='' or var2.get()=='' :
tkinter.messagebox.askokcancel('error','图片以及文件夹的路径请完整填写')
tabControl.select(0)#跳回第一个标签
pass
else :#路径合法,可以执行
Init_dog()#初始化图像显示
delButton(tree)#清空表列
showPic1()#预览所找图
#time_start=time.time()#time.time()为1970.1.1到当前时间的毫秒数
#搜图开始
if numberChosen.current() == 0 :
Execute.startSearch01(var1,var2,var3,tree,x,root)#单层文件夹
elif numberChosen.current() == 1 :
Execute.startSearch0(var1,var2,var3,tree,x,root)#多层文件夹
else :
tkinter.messagebox.askokcancel('error','算法出错')
#label3.config(text="耗时"+str(round(time_end-time_start,3))+'秒')#显示耗时
#自动筛选
#label=Label(root,textvariable = result, font=("黑体", 30, "bold"))
#label.grid(row=0,column=1,padx=20, pady=10,sticky=N)
#窗口开始===========================================================================
root = Tk() # 初始框的声明
root.title('基于哈希的图像文件整理应用的设计与实现')#设置窗口标题
root.geometry('1300x650+500+200')#设置窗口的大小宽x高+偏移量
root.resizable(width=True, height=True) #宽不可变, 高可变,默认为True
root.iconbitmap('img4.ico')
#ft1 = tkFont.Font(family='Fixdsys', size=12)
#ft2 = tkFont.Font(family='Fixdsys', size=10)
#标题
label9 = Label(root, text='欢迎使用本软件')
label9.pack(side=TOP)
frm = Frame(root)
frm.pack(side=TOP,fill=BOTH)
#底部公用
frm_BB = Frame(root)
frm_BB.pack(side=BOTTOM,fill=BOTH)
#按钮(右)
frm_BBB1 = Frame(frm_BB)
frm_BBB1.pack(side=RIGHT,fill=BOTH)
#label3=Label(frm_BBB1, text='耗时')
#label3.pack(fill=BOTH)
x=StringVar()
label4=Label(frm_BBB1,textvariable = x)
label4.pack(fill=BOTH)
x.set("无任务")
#button_img_gif = PhotoImage(file='start.png')
button_img = Button(frm_BBB1,text = '开始搜图',width=20,height=20,command=start)
button_img.pack(side=BOTTOM)
#框架(左)
frm_BB1 = Frame(frm_BB)
frm_BB1.pack(side=LEFT,fill=BOTH)
#left左边显示信息
frm_L = Frame(frm)
frm_L.pack(side=LEFT)
columns=("a","b","c")
tree=ttk.Treeview(frm_L,height=18,show="headings",columns=columns )#表格
for col in columns:
if (col=='a')or(col=='e'):#数字排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column2(tree, _col, False))#重建标题,添加控件排序方法
else :#默认排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column1(tree, _col, False))
tree.column('a', width=100, anchor='center')
tree.column('b', width=400, anchor='center')
tree.column('c', width=200, anchor='center')
tree.heading('a', text='相似度')
tree.heading('b', text='路径')
tree.heading('c', text='文件名')
tree.pack(side=LEFT,fill=BOTH)
tree.bind('<ButtonRelease-1>', treeviewClick) #单击离开
tree.bind('<Double-1>', treeviewClick1) #双击
tree.bind('<3>', treeviewClick2) #右键
scrollBar = Scrollbar(frm_L)#tree滚动条
scrollBar.pack(side=RIGHT, fill=Y)
scrollBar.config(command=tree.yview)
#right右边展示图片
frm_R = Frame(frm)
frm_R.pack(fill=BOTH)
#初始化右边的两个显示图片
pil_image = Image.open('img1.jpg') #打开
w, h = pil_image.size #获取原比例
pil_image_resized = resize(w, h, w_box, h_box, pil_image)#改成合适比例函数
tk_image = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
Label(frm_R, text='所找图片预览').pack()
label1 = Label(frm_R,image=tk_image, width=w_box, height=h_box)
label1.pack()
Label(frm_R, text='所选图片预览').pack()
label2 = Label(frm_R,image=tk_image, width=w_box, height=h_box)
label2.pack()
#tab
tabControl = ttk.Notebook(frm_BB1) # Create Tab Control
tab1 = Frame(tabControl) # Create a tab
tabControl.pack(expand=2, fill="both") # Pack to make visible
tabControl.add(tab1, text='基础识图') # Add the tab
tab2 = Frame(tabControl)
tabControl.add(tab2, text='更多功能')
#tab1 基本
frm_B0 = Frame(tab1)
frm_B0.pack()
frm_B01 = Frame(frm_B0)
frm_B01.pack(side=TOP,fill=BOTH)
Label(frm_B01, text='所寻图片的路径').pack(side=LEFT)
Button(frm_B01, text = "图片选择",command = selectPath1).pack(side=RIGHT)
var1 = StringVar()
e1 = Entry(frm_B01,width=300,textvariable = var1)
#var1.set("请在此处输入需要查询的图片的路径")
var1.set(r'C:\图片\img1.jpg')
e1.pack(side=RIGHT)
frm_B02 = Frame(frm_B0)
frm_B02.pack(side=TOP,fill=BOTH)
Label(frm_B02, text='搜索范围文件夹').pack(side=LEFT)
Button(frm_B02, text = "路径选择",command = selectPath).pack(side=RIGHT)
var2 = StringVar()
e2 = Entry(frm_B02,width=300,textvariable = var2)
#var2.set("请在此处输入需要搜索的文件夹路径")
var2.set(r'C:\图片2')
e2.pack(side=RIGHT)
var3 = StringVar()
e3 = Entry(frm_B0,width=8,textvariable=var3)
s=Scale(frm_B0,label="相似程度", from_=0,to=100,orient=HORIZONTAL,
length=950,showvalue=0,tickinterval=5,resolution=1,
variable=var3)
s.pack(side=LEFT,fill=BOTH)
var3.set("20")
e3.pack(side=LEFT,fill=BOTH)
#tab2 功能保存和四种算法选择
frm_B1 = Frame(tab2)
frm_B1.pack(side=TOP,fill=BOTH)
number = StringVar()
numberChosen = ttk.Combobox(frm_B1, width=16, textvariable = number,state='readonly')
numberChosen['values'] = ('特征码识图(纹理)单层','特征码识图(纹理)多层',) # 设置下拉列表的值
numberChosen.grid(column=1, row=1) # 设置其在界面中出现的位置 column代表列 row 代表行
numberChosen.current(0) # 设置下拉列表默认显示的值,0为 numberChosen['values'] 的下标值
numberChosen.grid(row=1,column=1,sticky=W)
#Button(frm_B1, text = "标签试图",bg='yellow',command =lambda : recommend2(ID=var6.get())).grid(row=3,column=2,sticky=W)
#var6 = StringVar()
#Entry(frm_B1,width=16,textvariable = var6).grid(row=3,column=2,sticky=E)
#var6.set("输入标签试试看")
label = Label(frm_B1, text='算法功能选择:')#下拉列表
label.grid(row=0,column=0)
| random_line_split |
||
FP.py |
f2 = 1.0*h_box/h
factor = min([f1, f2])
width = int(w*factor)
height = int(h*factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def selectPath1():#图片选择
path_img = askopenfilename()
print(path_img)
var1.set(path_img)
def selectPath():#路径选择
path_folder = askdirectory()
print(path_folder)
print(var2)
var2.set(path_folder)
def Init_dog():#图像显示初始化
global label1,label2,image4,image5
image4 = Image.open('img1.jpg') #打开img1的图像
w, h = image4.size #获取原比例
#w和h分开会报错“TypeError: unsupported operand type(s) for /: 'float' and 'tuple'”
pil_image_resized = resize(w, h, w_box, h_box, image4)#改成合适比例函数
image4 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
label1.configure(image = image4)
#label2.configure(image = image4)
image5 = Image.open('img1.jpg') #打开(因为image4全局后,这里引用上面就用不了了)
pil_image_resized = resize(w, h, w_box1, h_box1, image5) #改成合适比例函数
image5 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
#鼠标点击列表的监听
from PIL import Image
import pytesseract
def treeviewClick2(event):#右击打开目录
print ('右击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.system("start explorer "+str(item_text[1]))
def treeviewClick1(event):#双击打开选定的图片
print ('双击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.startfile(item_text[1]+'\\'+item_text[2]+item_text[3])
def treeviewClick(event):#单击展示选定的图片
print ('单击')
for item in tree.selection():
item_text = tree.item(item,"values")
showPic2(item_text[1]+'\\'+item_text[2]+item_text[3])#调用展示图片函数
#点击列表调用的函数
def showPic3(path):
global image13
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box1, h_box1, image1)#改成合适比例函数
image13= ImageTk.PhotoImage(image1_resized) #转成XX对象
#鼠标单击的调用
def showPic2(path):
global label2,image12
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image12= ImageTk.PhotoImage(image1_resized) #转成XX对象
label2.configure(image = image12)
#显示所查找的图片
def showPic1():
global label1,image11
image1 = Image.open(var1.get()) #打开
w, h = image1.size #获取原比例
im | for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column1(tv, col, not reverse))
def treeview_sort_column2(tv, col, reverse):#Treeview、列名、排列方式(数字排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(key=lambda t: float(t[0]), reverse=reverse)
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column2(tv, col, not reverse))
def delButton(tree):#清空tree表列
x=tree.get_children()
for item in x:
tree.delete(item)
def LbClick1(event):#listbox点击
try:
showPic3(lb.get(lb.curselection()))
except:
pass
def LbClick2(event):#listbox双击
try:
print (lb.get(lb.curselection()))
#读取图像
im=Image.open(lb.get(lb.curselection()))
#显示图像
im.show()
except:
pass
def lbExecute(listT):#listbox处理
lb.delete(0, END) #先清除
for item in listT:#再输出
lb.insert(END, item)
#执行搜图
import Execute
def start():
if var1.get()=='' or var2.get()=='' :
tkinter.messagebox.askokcancel('error','图片以及文件夹的路径请完整填写')
tabControl.select(0)#跳回第一个标签
pass
else :#路径合法,可以执行
Init_dog()#初始化图像显示
delButton(tree)#清空表列
showPic1()#预览所找图
#time_start=time.time()#time.time()为1970.1.1到当前时间的毫秒数
#搜图开始
if numberChosen.current() == 0 :
Execute.startSearch01(var1,var2,var3,tree,x,root)#单层文件夹
elif numberChosen.current() == 1 :
Execute.startSearch0(var1,var2,var3,tree,x,root)#多层文件夹
else :
tkinter.messagebox.askokcancel('error','算法出错')
#label3.config(text="耗时"+str(round(time_end-time_start,3))+'秒')#显示耗时
#自动筛选
#label=Label(root,textvariable = result, font=("黑体", 30, "bold"))
#label.grid(row=0,column=1,padx=20, pady=10,sticky=N)
#窗口开始===========================================================================
root = Tk() # 初始框的声明
root.title('基于哈希的图像文件整理应用的设计与实现')#设置窗口标题
root.geometry('1300x650+500+200')#设置窗口的大小宽x高+偏移量
root.resizable(width=True, height=True) #宽不可变, 高可变,默认为True
root.iconbitmap('img4.ico')
#ft1 = tkFont.Font(family='Fixdsys', size=12)
#ft2 = tkFont.Font(family='Fixdsys', size=10)
#标题
label9 = Label(root, text='欢迎使用本软件')
label9.pack(side=TOP)
frm = Frame(root)
frm.pack(side=TOP,fill=BOTH)
#底部公用
frm_BB = Frame(root)
frm_BB.pack(side=BOTTOM,fill=BOTH)
#按钮(右)
frm_BBB1 = Frame(frm_BB)
frm_BBB1.pack(side=RIGHT,fill=BOTH)
#label3=Label(frm_BBB1, text='耗时')
#label3.pack(fill=BOTH)
x=StringVar()
label4=Label(frm_BBB1,textvariable = x)
label4.pack(fill=BOTH)
x.set("无任务")
#button_img_gif = PhotoImage(file='start.png')
button_img = Button(frm_BBB1,text = '开始搜图',width=20,height=20,command=start)
button_img.pack(side=BOTTOM)
#框架(左)
frm_BB1 = Frame(frm_BB)
frm_BB1.pack(side=LEFT,fill=BOTH)
#left左边显示信息
frm_L = Frame(frm)
frm_L.pack(side=LEFT)
columns=("a","b","c")
tree=ttk.Treeview(frm_L,height=18,show="headings",columns=columns )#表格
for col in columns:
if (col=='a')or(col=='e'):#数字排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column2(tree, _col, False))#重建标题,添加控件排序方法
else :#默认排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column1(tree, _col, False))
tree.column('a', width=100, anchor='center')
tree.column('b', width=400, anchor='center')
tree.column('c | age1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image11= ImageTk.PhotoImage(image1_resized) #转成XX对象
label1.configure(image = image11)
def treeview_sort_column1(tv, col, reverse):#Treeview、列名、排列方式(桶排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(reverse=reverse)
| identifier_body |
FP.py |
f2 = 1.0*h_box/h
factor = min([f1, f2])
width = int(w*factor)
height = int(h*factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def selectPath1():#图片选择
path_img = askopenfilename()
print(path_img)
var1.set(path_img)
def selectPath():#路径选择
path_folder = askdirectory()
print(path_folder)
print(var2)
var2.set(path_folder)
def Init_dog():#图像显示初始化
global label1,label2,image4,image5
image4 = Image.open('img1.jpg') #打开img1的图像
w, h = image4.size #获取原比例
#w和h分开会报错“TypeError: unsupported operand type(s) for /: 'float' and 'tuple'”
pil_image_resized = resize(w, h, w_box, h_box, image4)#改成合适比例函数
image4 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
label1.configure(image = image4)
#label2.configure(image = image4)
image5 = Image.open('img1.jpg') #打开(因为image4全局后,这里引用上面就用不了了)
pil_image_resized = resize(w, h, w_box1, h_box1, image5) #改成合适比例函数
image5 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
#鼠标点击列表的监听
from PIL import Image
import pytesseract
def treeviewClick2(event):#右击打开目录
print ('右击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.system("start explorer "+str(item_text[1]))
def treeviewClick1(event):#双击打开选定的图片
print ('双击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.startfile(item_text[1]+'\\'+item_text[2]+item_text[3])
def treeviewClick(event):#单击展示选定的图片
print ('单击')
for item in tree.selection():
item_text = tree.item(item,"values")
showPic2(item_text[1]+'\\'+item_text[2]+item_text[3])#调用展示图片函数
#点击列表调用的函数
def showPic3(path):
global image13
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box1, h_box1, image1)#改成合适比例函数
image13= ImageTk.PhotoImage(image1_resized) #转成XX对象
#鼠标单击的调用
def showPic2(path):
global label2,image12
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image12= ImageTk.PhotoImage(image1_resized) #转成XX对象
label2.configure(image = image12)
#显示所查找的图片
def showPic1():
global label1,image11
image1 = Image.open(var1.get()) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image11= ImageTk.PhotoImage(image1_resized) #转成XX对象
label1.configure(image = image11)
def treeview_sort_column1(tv, col, reverse):#Treeview、列名、排列方式(桶排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(reverse=reverse)
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column1(tv, col, not reverse))
def treeview_sort_column2(tv, col, reverse):#Treeview、列名、排列方式(数字排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(key=lambda t: float(t[0]), reverse=reverse)
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column2(tv, col, not reverse))
def delButton(tree):#清空tree表列
x=tree.get_children()
for item in x:
tree.delete(item)
def LbClick | try:
showPic3(lb.get(lb.curselection()))
except:
pass
def LbClick2(event):#listbox双击
try:
print (lb.get(lb.curselection()))
#读取图像
im=Image.open(lb.get(lb.curselection()))
#显示图像
im.show()
except:
pass
def lbExecute(listT):#listbox处理
lb.delete(0, END) #先清除
for item in listT:#再输出
lb.insert(END, item)
#执行搜图
import Execute
def start():
if var1.get()=='' or var2.get()=='' :
tkinter.messagebox.askokcancel('error','图片以及文件夹的路径请完整填写')
tabControl.select(0)#跳回第一个标签
pass
else :#路径合法,可以执行
Init_dog()#初始化图像显示
delButton(tree)#清空表列
showPic1()#预览所找图
#time_start=time.time()#time.time()为1970.1.1到当前时间的毫秒数
#搜图开始
if numberChosen.current() == 0 :
Execute.startSearch01(var1,var2,var3,tree,x,root)#单层文件夹
elif numberChosen.current() == 1 :
Execute.startSearch0(var1,var2,var3,tree,x,root)#多层文件夹
else :
tkinter.messagebox.askokcancel('error','算法出错')
#label3.config(text="耗时"+str(round(time_end-time_start,3))+'秒')#显示耗时
#自动筛选
#label=Label(root,textvariable = result, font=("黑体", 30, "bold"))
#label.grid(row=0,column=1,padx=20, pady=10,sticky=N)
#窗口开始===========================================================================
root = Tk() # 初始框的声明
root.title('基于哈希的图像文件整理应用的设计与实现')#设置窗口标题
root.geometry('1300x650+500+200')#设置窗口的大小宽x高+偏移量
root.resizable(width=True, height=True) #宽不可变, 高可变,默认为True
root.iconbitmap('img4.ico')
#ft1 = tkFont.Font(family='Fixdsys', size=12)
#ft2 = tkFont.Font(family='Fixdsys', size=10)
#标题
label9 = Label(root, text='欢迎使用本软件')
label9.pack(side=TOP)
frm = Frame(root)
frm.pack(side=TOP,fill=BOTH)
#底部公用
frm_BB = Frame(root)
frm_BB.pack(side=BOTTOM,fill=BOTH)
#按钮(右)
frm_BBB1 = Frame(frm_BB)
frm_BBB1.pack(side=RIGHT,fill=BOTH)
#label3=Label(frm_BBB1, text='耗时')
#label3.pack(fill=BOTH)
x=StringVar()
label4=Label(frm_BBB1,textvariable = x)
label4.pack(fill=BOTH)
x.set("无任务")
#button_img_gif = PhotoImage(file='start.png')
button_img = Button(frm_BBB1,text = '开始搜图',width=20,height=20,command=start)
button_img.pack(side=BOTTOM)
#框架(左)
frm_BB1 = Frame(frm_BB)
frm_BB1.pack(side=LEFT,fill=BOTH)
#left左边显示信息
frm_L = Frame(frm)
frm_L.pack(side=LEFT)
columns=("a","b","c")
tree=ttk.Treeview(frm_L,height=18,show="headings",columns=columns )#表格
for col in columns:
if (col=='a')or(col=='e'):#数字排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column2(tree, _col, False))#重建标题,添加控件排序方法
else :#默认排序
tree.heading(col, text=col, command=lambda _col=col: treeview_sort_column1(tree, _col, False))
tree.column('a', width=100, anchor='center')
tree.column('b', width=400, anchor='center')
tree.column('c', | 1(event):#listbox点击
| conditional_block |
FP.py | retrieval.imagehash_retrieval(var1, var2,'dhash',thred=0)
print(hash_res)
def clicked3():
hash_res = imagehash_retrieval.imagehash_retrieval(var1, var2,'phash',thred=0)
print(hash_res)
def clicked4():
hash_res = imagehash_retrieval.imagehash_retrieval(var1, var2,'whash',thred=0)
print(hash_res)
#将结果保存至文件夹泽
def mycopyfile(srcfile,dstfile):
if not os.path.isfile(srcfile):
print ("%s not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
shutil.copyfile(srcfile,dstfile) #复制文件
print ("copy %s -> %s"%( srcfile,dstfile))
def save():
for item in tree.get_children():
item_text = tree.item(item,"values")
fileB=os.path.join(item_text[1],item_text[2]+item_text[3])#被复制
file2B=os.path.join(var2.get(),"泽",'find_'+item_text[2]+item_text[3])#复制去的文件
mycopyfile(fileB,file2B)
#函数声明
def resize(w, h, w_box, h_box, pil_image): #图片缩放
f1 = 1.0*w_box/w # 1.0 forces float division in Python2
f2 = 1.0*h_box/h
factor = min([f1, f2])
width = int(w*factor)
height = int(h*factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def selectPath1():#图片选择
path_img = askopenfilename()
print(path_img)
var1.set(path_img)
def selectPath():#路径选择
path_folder = askdirectory()
print(path_folder)
print(var2)
var2.set(path_folder)
def Init_dog():#图像显示初始化
global label1,label2,image4,image5
image4 = Image.open('img1.jpg') #打开img1的图像
w, h = image4.size #获取原比例
#w和h分开会报错“TypeError: unsupported operand type(s) for /: 'float' and 'tuple'”
pil_image_resized = resize(w, h, w_box, h_box, image4)#改成合适比例函数
image4 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
label1.configure(image = image4)
#label2.configure(image = image4)
image5 = Image.open('img1.jpg') #打开(因为image4全局后,这里引用上面就用不了了)
pil_image_resized = resize(w, h, w_box1, h_box1, image5) #改成合适比例函数
image5 = ImageTk.PhotoImage(pil_image_resized) #转成XX对象
#鼠标点击列表的监听
from PIL import Image
import pytesseract
def treeviewClick2(event):#右击打开目录
print ('右击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.system("start explorer "+str(item_text[1]))
def treeviewClick1(event):#双击打开选定的图片
print ('双击')
for item in tree.selection():
item_text = tree.item(item,"values")
os.startfile(item_text[1]+'\\'+item_text[2]+item_text[3])
def treeviewClick(event):#单击展示选定的图片
print ('单击')
for item in tree.selection():
item_text = tree.item(item,"values")
showPic2(item_text[1]+'\\'+item_text[2]+item_text[3])#调用展示图片函数
#点击列表调用的函数
def showPic3(path):
global image13
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box1, h_box1, image1)#改成合适比例函数
image13= ImageTk.PhotoImage(image1_resized) #转成XX对象
#鼠标单击的调用
def showPic2(path):
global label2,image12
image1 = Image.open(path) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image12= ImageTk.PhotoImage(image1_resized) #转成XX对象
label2.configure(image = image12)
#显示所查找的图片
def showPic1():
global label1,image11
image1 = Image.open(var1.get()) #打开
w, h = image1.size #获取原比例
image1_resized = resize(w, h, w_box, h_box, image1)#改成合适比例函数
image11= ImageTk.PhotoImage(image1_resized) #转成XX对象
label1.configure(image = image11)
def treeview_sort_column1(tv, col, reverse):#Treeview、列名、排列方式(桶排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(reverse=reverse)
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column1(tv, col, not reverse))
def treeview_sort_column2(tv, col, reverse):#Treeview、列名、排列方式(数字排序)
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(key=lambda t: float(t[0]), reverse=reverse)
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
tv.heading(col, command=lambda: treeview_sort_column2(tv, col, not reverse))
def delButton(tree):#清空tree表列
x=tree.get_children()
for item in x:
tree.delete(item)
def LbClick1(event):#listbox点击
try:
showPic3(lb.get(lb.curselection()))
except:
pass
def LbClick2(event):#listbox双击
try:
print (lb.get(lb.curselection()))
#读取图像
im=Image.open(lb.get(lb.curselection()))
#显示图像
im.show()
except:
pass
def lbExecute(listT):#listbox处理
lb.delete(0, END) #先清除
for item in listT:#再输出
lb.insert(END, item)
#执行搜图
import Execute
def start():
if var1.get()=='' or var2.get()=='' :
tkinter.messagebox.askokcancel('error','图片以及文件夹的路径请完整填写')
tabControl.select(0)#跳回第一个标签
pass
else :#路径合法,可以执行
Init_dog()#初始化图像显示
delButton(tree)#清空表列
showPic1()#预览所找图
#time_start=time.time()#time.time()为1970.1.1到当前时间的毫秒数
#搜图开始
if numberChosen.current() == 0 :
Execute.startSearch01(var1,var2,var3,tree,x,root)#单层文件夹
elif numberChosen.current() == 1 :
Execute.startSearch0(var1,var2,var3,tree,x,root)#多层文件夹
else :
tkinter.messagebox.askokcancel('error','算法出错')
#label3.config(text="耗时"+str(round(time_end-time_start,3))+'秒')#显示耗时
#自动筛选
#label=Label(root,textvariable = result, font=("黑体", 30, "bold"))
#label.grid(row=0,column=1,padx=20, pady=10,sticky=N)
#窗口开始===========================================================================
root = Tk() # 初始框的声明
root.title('基于哈希的图像文件整理应用的设计与实现')#设置窗口标题
root.geometry('1300x650+500+200')#设置窗口的大小宽x高+偏移量
root.resizable(width=True, height=True) #宽不可变, 高可变,默认为True
root.iconbitmap('img4.ico')
#ft1 = tkFont.Font(family='Fixdsys', size=12)
#ft2 = tkFont.Font(family='Fixdsys', size=10)
#标题
label9 = Label(root, text='欢迎使用本软件')
label9.pack(side=TOP)
frm = Frame(root)
frm.pack(side=TOP,fill=BOTH)
#底部公用
frm_BB = Frame(root)
frm_BB | agehash_ | identifier_name |
|
get_block_template.rs | invalid proposal format", parse_error.into()).into(),
);
}
};
let block_verifier_router_response = block_verifier_router
.ready()
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?
.call(zebra_consensus::Request::CheckProposal(Arc::new(block)))
.await;
Ok(block_verifier_router_response
.map(|_hash| ProposalResponse::Valid)
.unwrap_or_else(|verify_chain_error| {
tracing::info!(
?verify_chain_error,
"error response from block_verifier_router in CheckProposal request"
);
ProposalResponse::rejected("invalid proposal", verify_chain_error)
})
.into())
}
// - State and syncer checks
/// Returns an error if Zebra is not synced to the consensus chain tip.
/// This error might be incorrect if the local clock is skewed.
pub fn check_synced_to_tip<Tip, SyncStatus>(
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<()>
where
Tip: ChainTip + Clone + Send + Sync + 'static,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
{
// The tip estimate may not be the same as the one coming from the state
// but this is ok for an estimate
let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip
.estimate_distance_to_network_chain_tip(network)
.ok_or_else(|| Error {
code: ErrorCode::ServerError(0),
message: "No Chain tip available yet".to_string(),
data: None,
})?;
if !sync_status.is_close_to_tip()
|| estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP
{
tracing::info!(
?estimated_distance_to_chain_tip,
?local_tip_height,
"Zebra has not synced to the chain tip. \
Hint: check your network connection, clock, and time zone settings."
);
return Err(Error {
code: NOT_SYNCED_ERROR_CODE,
message: format!(
"Zebra has not synced to the chain tip, \
estimated distance: {estimated_distance_to_chain_tip:?}, \
local tip: {local_tip_height:?}. \
Hint: check your network connection, clock, and time zone settings."
),
data: None,
});
}
Ok(())
}
// - State and mempool data fetches
/// Returns the state data for the block template.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the state does not have enough blocks, returns an error.
pub async fn fetch_state_tip_and_local_time<State>(
state: State,
) -> Result<GetBlockTemplateChainInfo>
where
State: Service<
zebra_state::ReadRequest,
Response = zebra_state::ReadResponse,
Error = zebra_state::BoxError,
> + Clone
+ Send
+ Sync
+ 'static,
{
let request = zebra_state::ReadRequest::ChainInfo;
let response = state
.oneshot(request.clone())
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let chain_info = match response {
zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info,
_ => unreachable!("incorrect response to {request:?}"),
};
Ok(chain_info)
}
/// Returns the transactions that are currently in `mempool`, or None if the
/// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions.
pub async fn fetch_mempool_transactions<Mempool>(
mempool: Mempool,
chain_tip_hash: block::Hash,
) -> Result<Option<Vec<VerifiedUnminedTx>>>
where
Mempool: Service<
mempool::Request,
Response = mempool::Response,
Error = zebra_node_services::BoxError,
> + 'static,
Mempool::Future: Send,
{
let response = mempool
.oneshot(mempool::Request::FullTransactions)
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let mempool::Response::FullTransactions {
transactions,
last_seen_tip_hash,
} = response
else {
unreachable!("unmatched response to a mempool::FullTransactions request")
};
// Check that the mempool and state were in sync when we made the requests
Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions))
}
// - Response processing
/// Generates and returns the coinbase transaction and default roots.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn generate_coinbase_and_roots(
network: Network,
height: Height,
miner_address: transparent::Address,
mempool_txs: &[VerifiedUnminedTx],
history_tree: Arc<zebra_chain::history_tree::HistoryTree>,
like_zcashd: bool,
extra_coinbase_data: Vec<u8>,
) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) {
// Generate the coinbase transaction
let miner_fee = calculate_miner_fee(mempool_txs);
let coinbase_txn = generate_coinbase_transaction(
network,
height,
miner_address,
miner_fee,
like_zcashd,
extra_coinbase_data,
);
// Calculate block default roots
//
// TODO: move expensive root, hash, and tree cryptography to a rayon thread?
let default_roots = calculate_default_root_hashes(&coinbase_txn, mempool_txs, history_tree);
let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee);
(coinbase_txn, default_roots)
}
// - Coinbase transaction processing
/// Returns a coinbase transaction for the supplied parameters.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn generate_coinbase_transaction(
network: Network,
height: Height,
miner_address: transparent::Address,
miner_fee: Amount<NonNegative>,
like_zcashd: bool,
extra_coinbase_data: Vec<u8>,
) -> UnminedTx {
let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd);
if like_zcashd {
Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data)
.into()
} else {
Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into()
}
}
/// Returns the total miner fee for `mempool_txs`.
pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount<NonNegative> {
let miner_fee: amount::Result<Amount<NonNegative>> =
mempool_txs.iter().map(|tx| tx.miner_fee).sum();
miner_fee.expect(
"invalid selected transactions: \
fees in a valid block can not be more than MAX_MONEY",
)
}
/// Returns the standard funding stream and miner reward transparent output scripts
/// for `network`, `height` and `miner_fee`.
///
/// Only works for post-Canopy heights.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn standard_coinbase_outputs(
network: Network,
height: Height,
miner_address: transparent::Address,
miner_fee: Amount<NonNegative>,
like_zcashd: bool,
) -> Vec<(Amount<NonNegative>, transparent::Script)> {
let funding_streams = funding_stream_values(height, network)
.expect("funding stream value calculations are valid for reasonable chain heights");
// Optional TODO: move this into a zebra_consensus function?
let funding_streams: HashMap<
FundingStreamReceiver,
(Amount<NonNegative>, transparent::Address),
> = funding_streams
.into_iter()
.map(|(receiver, amount)| {
(
receiver,
(amount, funding_stream_address(height, network, receiver)),
)
})
.collect();
let miner_reward = miner_subsidy(height, network)
.expect("reward calculations are valid for reasonable chain heights")
+ miner_fee;
let miner_reward =
miner_reward.expect("reward calculations are valid for reasonable chain heights");
combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd)
}
/// Combine the miner reward and funding streams into a list of coinbase amounts and addresses.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
fn | combine_coinbase_outputs | identifier_name |
|
get_block_template.rs | data: Some(_),
..
} => Err(Error {
code: ErrorCode::InvalidParams,
message: "\"data\" parameter must be \
omitted in \"template\" mode"
.to_string(),
data: None,
}),
}
}
/// Returns the miner address, or an error if it is invalid.
pub fn check_miner_address(
miner_address: Option<transparent::Address>,
) -> Result<transparent::Address> {
miner_address.ok_or_else(|| Error {
code: ErrorCode::ServerError(0),
message: "configure mining.miner_address in zebrad.toml \
with a transparent address"
.to_string(),
data: None,
})
}
/// Attempts to validate block proposal against all of the server's
/// usual acceptance rules (except proof-of-work).
///
/// Returns a `getblocktemplate` [`Response`].
pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>(
mut block_verifier_router: BlockVerifierRouter,
block_proposal_bytes: Vec<u8>,
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<Response>
where
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
+ 'static,
Tip: ChainTip + Clone + Send + Sync + 'static,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
{
check_synced_to_tip(network, latest_chain_tip, sync_status)?;
let block: Block = match block_proposal_bytes.zcash_deserialize_into() {
Ok(block) => block,
Err(parse_error) => {
tracing::info!(
?parse_error,
"error response from block parser in CheckProposal request"
);
return Ok(
ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(),
);
}
};
let block_verifier_router_response = block_verifier_router
.ready()
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?
.call(zebra_consensus::Request::CheckProposal(Arc::new(block)))
.await;
Ok(block_verifier_router_response
.map(|_hash| ProposalResponse::Valid)
.unwrap_or_else(|verify_chain_error| {
tracing::info!(
?verify_chain_error,
"error response from block_verifier_router in CheckProposal request"
);
ProposalResponse::rejected("invalid proposal", verify_chain_error)
})
.into())
}
// - State and syncer checks
/// Returns an error if Zebra is not synced to the consensus chain tip.
/// This error might be incorrect if the local clock is skewed.
pub fn check_synced_to_tip<Tip, SyncStatus>(
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<()>
where
Tip: ChainTip + Clone + Send + Sync + 'static,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
{
// The tip estimate may not be the same as the one coming from the state
// but this is ok for an estimate
let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip
.estimate_distance_to_network_chain_tip(network)
.ok_or_else(|| Error {
code: ErrorCode::ServerError(0),
message: "No Chain tip available yet".to_string(),
data: None,
})?;
if !sync_status.is_close_to_tip()
|| estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP
{
tracing::info!(
?estimated_distance_to_chain_tip,
?local_tip_height,
"Zebra has not synced to the chain tip. \
Hint: check your network connection, clock, and time zone settings."
);
return Err(Error {
code: NOT_SYNCED_ERROR_CODE,
message: format!(
"Zebra has not synced to the chain tip, \
estimated distance: {estimated_distance_to_chain_tip:?}, \
local tip: {local_tip_height:?}. \
Hint: check your network connection, clock, and time zone settings."
),
data: None,
});
}
Ok(())
}
// - State and mempool data fetches
/// Returns the state data for the block template.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the state does not have enough blocks, returns an error.
pub async fn fetch_state_tip_and_local_time<State>(
state: State,
) -> Result<GetBlockTemplateChainInfo>
where
State: Service<
zebra_state::ReadRequest,
Response = zebra_state::ReadResponse,
Error = zebra_state::BoxError,
> + Clone
+ Send
+ Sync
+ 'static,
|
/// Returns the transactions that are currently in `mempool`, or None if the
/// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions.
pub async fn fetch_mempool_transactions<Mempool>(
mempool: Mempool,
chain_tip_hash: block::Hash,
) -> Result<Option<Vec<VerifiedUnminedTx>>>
where
Mempool: Service<
mempool::Request,
Response = mempool::Response,
Error = zebra_node_services::BoxError,
> + 'static,
Mempool::Future: Send,
{
let response = mempool
.oneshot(mempool::Request::FullTransactions)
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let mempool::Response::FullTransactions {
transactions,
last_seen_tip_hash,
} = response
else {
unreachable!("unmatched response to a mempool::FullTransactions request")
};
// Check that the mempool and state were in sync when we made the requests
Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions))
}
// - Response processing
/// Generates and returns the coinbase transaction and default roots.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn generate_coinbase_and_roots(
network: Network,
height: Height,
miner_address: transparent::Address,
mempool_txs: &[VerifiedUnminedTx],
history_tree: Arc<zebra_chain::history_tree::HistoryTree>,
like_zcashd: bool,
extra_coinbase_data: Vec<u8>,
) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) {
// Generate the coinbase transaction
let miner_fee = calculate_miner_fee(mempool_txs);
let coinbase_txn = generate_coinbase_transaction(
network,
height,
miner_address,
miner_fee,
like_zcashd,
extra_coinbase_data,
);
// Calculate block default roots
//
// TODO: move expensive root, hash, and tree cryptography to a rayon thread?
let default_roots = calculate_default_root_hashes(&coinbase_txn, mempool_txs, history_tree);
let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee);
(coinbase_txn, default_roots)
}
// - Coinbase transaction processing
/// Returns a coinbase transaction for the supplied parameters.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn generate_coinbase_transaction(
network: Network,
height: Height,
miner_address: transparent::Address,
miner_fee: Amount<NonNegative>,
like_zcashd: bool,
extra_coinbase_data: Vec<u8>,
) -> UnminedTx {
let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd);
if like_zcashd {
Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data)
.into()
} else {
Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into()
}
}
/// Returns the total miner fee for `mempool_txs`.
pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount<NonNegative> {
let miner_fee: amount::Result<Amount<NonNegative>> =
mempool_txs.iter().map(|tx| tx.min | {
let request = zebra_state::ReadRequest::ChainInfo;
let response = state
.oneshot(request.clone())
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let chain_info = match response {
zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info,
_ => unreachable!("incorrect response to {request:?}"),
};
Ok(chain_info)
} | identifier_body |
get_block_template.rs | transparent,
};
use zebra_consensus::{
funding_stream_address, funding_stream_values, miner_subsidy, FundingStreamReceiver,
};
use zebra_node_services::mempool;
use zebra_state::GetBlockTemplateChainInfo;
use crate::methods::get_block_template_rpcs::{
constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE},
types::{default_roots::DefaultRoots, transaction::TransactionTemplate},
};
pub use crate::methods::get_block_template_rpcs::types::get_block_template::*;
// - Parameter checks
/// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode,
///
/// Returns an error if there's a mismatch between the mode and whether `data` is provided.
pub fn check_parameters(parameters: &Option<JsonParameters>) -> Result<()> {
let Some(parameters) = parameters else {
return Ok(());
};
match parameters {
JsonParameters {
mode: GetBlockTemplateRequestMode::Template,
data: None,
..
}
| JsonParameters {
mode: GetBlockTemplateRequestMode::Proposal,
data: Some(_),
..
} => Ok(()),
JsonParameters {
mode: GetBlockTemplateRequestMode::Proposal,
data: None,
..
} => Err(Error {
code: ErrorCode::InvalidParams,
message: "\"data\" parameter must be \
provided in \"proposal\" mode"
.to_string(),
data: None,
}),
JsonParameters {
mode: GetBlockTemplateRequestMode::Template,
data: Some(_),
..
} => Err(Error {
code: ErrorCode::InvalidParams,
message: "\"data\" parameter must be \
omitted in \"template\" mode"
.to_string(),
data: None,
}),
}
}
/// Returns the miner address, or an error if it is invalid.
pub fn check_miner_address(
miner_address: Option<transparent::Address>,
) -> Result<transparent::Address> {
miner_address.ok_or_else(|| Error {
code: ErrorCode::ServerError(0),
message: "configure mining.miner_address in zebrad.toml \
with a transparent address"
.to_string(),
data: None,
})
}
/// Attempts to validate block proposal against all of the server's
/// usual acceptance rules (except proof-of-work).
///
/// Returns a `getblocktemplate` [`Response`].
pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>(
mut block_verifier_router: BlockVerifierRouter,
block_proposal_bytes: Vec<u8>,
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<Response>
where
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
+ 'static,
Tip: ChainTip + Clone + Send + Sync + 'static,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
{
check_synced_to_tip(network, latest_chain_tip, sync_status)?;
let block: Block = match block_proposal_bytes.zcash_deserialize_into() {
Ok(block) => block,
Err(parse_error) => {
tracing::info!(
?parse_error,
"error response from block parser in CheckProposal request"
);
return Ok(
ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(),
);
}
};
let block_verifier_router_response = block_verifier_router
.ready()
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?
.call(zebra_consensus::Request::CheckProposal(Arc::new(block)))
.await;
Ok(block_verifier_router_response
.map(|_hash| ProposalResponse::Valid)
.unwrap_or_else(|verify_chain_error| {
tracing::info!(
?verify_chain_error,
"error response from block_verifier_router in CheckProposal request"
);
ProposalResponse::rejected("invalid proposal", verify_chain_error)
})
.into())
}
// - State and syncer checks
/// Returns an error if Zebra is not synced to the consensus chain tip.
/// This error might be incorrect if the local clock is skewed.
pub fn check_synced_to_tip<Tip, SyncStatus>(
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<()>
where
Tip: ChainTip + Clone + Send + Sync + 'static,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
{
// The tip estimate may not be the same as the one coming from the state
// but this is ok for an estimate
let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip
.estimate_distance_to_network_chain_tip(network)
.ok_or_else(|| Error {
code: ErrorCode::ServerError(0),
message: "No Chain tip available yet".to_string(),
data: None,
})?;
if !sync_status.is_close_to_tip()
|| estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP
{
tracing::info!(
?estimated_distance_to_chain_tip,
?local_tip_height,
"Zebra has not synced to the chain tip. \
Hint: check your network connection, clock, and time zone settings."
);
return Err(Error {
code: NOT_SYNCED_ERROR_CODE,
message: format!(
"Zebra has not synced to the chain tip, \
estimated distance: {estimated_distance_to_chain_tip:?}, \
local tip: {local_tip_height:?}. \
Hint: check your network connection, clock, and time zone settings."
),
data: None,
});
}
Ok(())
}
// - State and mempool data fetches
/// Returns the state data for the block template.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the state does not have enough blocks, returns an error.
pub async fn fetch_state_tip_and_local_time<State>(
state: State,
) -> Result<GetBlockTemplateChainInfo>
where
State: Service<
zebra_state::ReadRequest,
Response = zebra_state::ReadResponse,
Error = zebra_state::BoxError,
> + Clone
+ Send
+ Sync
+ 'static,
{
let request = zebra_state::ReadRequest::ChainInfo;
let response = state
.oneshot(request.clone())
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let chain_info = match response {
zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info,
_ => unreachable!("incorrect response to {request:?}"),
};
Ok(chain_info)
}
/// Returns the transactions that are currently in `mempool`, or None if the
/// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state.
///
/// You should call `check_synced_to_tip()` before calling this function.
/// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions.
pub async fn fetch_mempool_transactions<Mempool>(
mempool: Mempool,
chain_tip_hash: block::Hash,
) -> Result<Option<Vec<VerifiedUnminedTx>>>
where
Mempool: Service<
mempool::Request,
Response = mempool::Response,
Error = zebra_node_services::BoxError,
> + 'static,
Mempool::Future: Send,
{
let response = mempool
.oneshot(mempool::Request::FullTransactions)
.await
.map_err(|error| Error {
code: ErrorCode::ServerError(0),
message: error.to_string(),
data: None,
})?;
let mempool::Response::FullTransactions {
transactions,
last_seen_tip_hash,
} = response
else {
unreachable!("unmatched response to a mempool::FullTransactions request")
};
// Check that the mempool and state were in sync when we made the requests
Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions))
}
// - Response processing
/// Generates and returns the coinbase transaction and default roots.
///
/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd`
/// in the `getblocktemplate` RPC.
pub fn generate_coinbase_and_roots(
network: Network,
height: Height,
miner_address: transparent::Address,
mempool_txs: &[VerifiedUnminedTx],
history_tree: Arc<zebra_chain::history_tree::HistoryTree>,
like_zcashd: bool,
extra_coinbase_data: Vec<u8>,
) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) {
// Generate the coinbase transaction
let miner_fee = calculate_miner_fee(mempool_txs);
let coinbase_txn = generate_coinbase_transaction(
network,
height,
miner_address,
miner_fee | parameters::Network,
serialization::ZcashDeserializeInto,
transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, | random_line_split |
|
server.rs | layerinfos: format!("{}", layerinfos.join(", ")),
hasviewer: hasviewer,
}
}
}
struct StaticFiles {
files: HashMap<&'static str, (&'static [u8], MediaType)>,
}
impl StaticFiles {
fn init() -> StaticFiles {
let mut static_files = StaticFiles {
files: HashMap::new(),
};
static_files.add(
"favicon.ico",
include_bytes!("static/favicon.ico"),
MediaType::Ico,
);
static_files.add(
"index.html",
include_bytes!("static/index.html"),
MediaType::Html,
);
static_files.add(
"viewer.js",
include_bytes!("static/viewer.js"),
MediaType::Js,
);
static_files.add(
"viewer.css",
include_bytes!("static/viewer.css"),
MediaType::Css,
);
static_files.add(
"maputnik.html",
include_bytes!("static/maputnik.html"),
MediaType::Html,
);
static_files.add(
"maputnik.js",
include_bytes!("static/maputnik.js"),
MediaType::Js,
);
static_files.add(
"maputnik-vendor.js",
include_bytes!("static/maputnik-vendor.js"),
MediaType::Js,
);
static_files.add(
"img/maputnik.png",
include_bytes!("static/img/maputnik.png"),
MediaType::Png,
);
static_files.add(
"fonts/Roboto-Regular.ttf",
include_bytes!("static/fonts/Roboto-Regular.ttf"),
MediaType::Ttf,
);
static_files.add(
"fonts/Roboto-Medium.ttf",
include_bytes!("static/fonts/Roboto-Medium.ttf"),
MediaType::Ttf,
);
static_files
}
fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) {
self.files.insert(name, (data, media_type));
}
fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> {
let mut key = if name == "." {
"index.html".to_string()
} else {
name
};
if let Some(path) = base {
key = format!("{}/{}", path, key);
}
self.files.get(&key as &str)
}
}
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) {
layer.simplify = simplify;
if simplify {
// Limit features by default unless simplify is set to false
layer.query_limit = Some(1000);
}
layer.buffer_size = match layer.geometry_type {
Some(ref geom) => {
if clip {
if geom.contains("POLYGON") {
Some(1)
} else {
Some(0)
}
} else {
None
}
}
None => None,
};
}
pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) {
if let Some(cfgpath) = args.value_of("config") {
info!("Reading configuration from '{}'", cfgpath);
for argname in vec!["dbconn", "datasource", "qgs"] {
if args.value_of(argname).is_some() {
warn!("Ignoring argument `{}`", argname);
}
}
let config = read_config(cfgpath).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
svc.connect();
(svc, config)
} else {
let bind = args.value_of("bind").unwrap_or("127.0.0.1");
let port =
u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number");
let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap();
config.webserver.bind = Some(bind.to_string());
config.webserver.port = Some(port);
let cache = match args.value_of("cache") {
None => Tilecache::Nocache(Nocache),
Some(dir) => Tilecache::Filecache(Filecache {
basepath: dir.to_string(),
baseurl: None,
}),
};
let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false);
let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false);
let grid = Grid::web_mercator();
let mut tilesets = Vec::new();
let datasources = if let Some(qgs) = args.value_of("qgs") {
info!("Reading configuration from '{}'", qgs);
let (datasources, mut tileset) = read_qgs(qgs);
for layer in tileset.layers.iter_mut() {
set_layer_buffer_defaults(layer, simplify, clip);
}
tilesets.push(tileset);
datasources
} else {
let datasources = Datasources::from_args(args);
if datasources.datasources.is_empty() {
println!("Either 'config', 'dbconn' or 'datasource' is required");
process::exit(1)
}
let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries)
for (_name, ds) in &datasources.datasources {
let dsconn = ds.connected();
let mut layers = dsconn.detect_layers(detect_geometry_types);
while let Some(mut l) = layers.pop() {
let extent = dsconn.layer_extent(&l);
set_layer_buffer_defaults(&mut l, simplify, clip);
let tileset = Tileset {
name: l.name.clone(),
attribution: None,
extent: extent,
layers: vec![l],
};
tilesets.push(tileset);
}
}
datasources
};
let mut svc = MvtService {
datasources: datasources,
grid: grid,
tilesets: tilesets,
cache: cache,
};
svc.connect(); //TODO: ugly - we connect twice
(svc, config)
}
}
pub fn webserver(args: &ArgMatches) {
let (mut service, config) = service_from_args(args);
let mvt_viewer = config.service.mvt.viewer;
let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let threads = config.webserver.threads.unwrap_or(4) as usize;
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
service.prepare_feature_queries();
service.init_cache();
let mut tileset_infos: Vec<TilesetInfo> = service
.tilesets
.iter()
.map(|set| TilesetInfo::from_tileset(&set))
.collect();
tileset_infos.sort_by_key(|ti| ti.name.clone());
let mut server = Nickel::with_data(service);
server.options = Options::default().thread_count(Some(threads));
// Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368)
server.keep_alive_timeout(None);
server.utilize(log_request);
server.get(
"/**(.style)?.json",
middleware! { |_req, mut res|
res.set(MediaType::Json);
res.set(AccessControlAllowMethods(vec![Method::Get]));
res.set(AccessControlAllowOrigin::Any);
},
);
server.get(
"/index.json",
middleware! { |_req, res|
let service: &MvtService = res.server_data();
let json = service.get_mvt_metadata().unwrap();
serde_json::to_vec(&json).unwrap()
| {
let mut hasviewer = true;
let layerinfos: Vec<String> = set.layers
.iter()
.map(|l| {
let geom_type = l.geometry_type.clone().unwrap_or("UNKNOWN".to_string());
hasviewer = hasviewer
&& [
"POINT",
"LINESTRING",
"POLYGON",
"MULTPOINT",
"MULTILINESTRING",
"MULTIPOLYGON",
].contains(&(&geom_type as &str));
format!("{} [{}]", &l.name, &geom_type)
})
.collect();
TilesetInfo {
name: set.name.clone(), | identifier_body |
|
server.rs | MediaType::Ico,
);
static_files.add(
"index.html",
include_bytes!("static/index.html"),
MediaType::Html,
);
static_files.add(
"viewer.js",
include_bytes!("static/viewer.js"),
MediaType::Js,
);
static_files.add(
"viewer.css",
include_bytes!("static/viewer.css"),
MediaType::Css,
);
static_files.add(
"maputnik.html",
include_bytes!("static/maputnik.html"),
MediaType::Html,
);
static_files.add(
"maputnik.js",
include_bytes!("static/maputnik.js"),
MediaType::Js,
);
static_files.add(
"maputnik-vendor.js",
include_bytes!("static/maputnik-vendor.js"),
MediaType::Js,
);
static_files.add(
"img/maputnik.png",
include_bytes!("static/img/maputnik.png"),
MediaType::Png,
);
static_files.add(
"fonts/Roboto-Regular.ttf",
include_bytes!("static/fonts/Roboto-Regular.ttf"),
MediaType::Ttf,
);
static_files.add(
"fonts/Roboto-Medium.ttf",
include_bytes!("static/fonts/Roboto-Medium.ttf"),
MediaType::Ttf,
);
static_files
}
fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) {
self.files.insert(name, (data, media_type));
}
fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> {
let mut key = if name == "." {
"index.html".to_string()
} else {
name
};
if let Some(path) = base {
key = format!("{}/{}", path, key);
}
self.files.get(&key as &str)
}
}
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) {
layer.simplify = simplify;
if simplify {
// Limit features by default unless simplify is set to false
layer.query_limit = Some(1000);
}
layer.buffer_size = match layer.geometry_type {
Some(ref geom) => {
if clip {
if geom.contains("POLYGON") {
Some(1)
} else |
} else {
None
}
}
None => None,
};
}
pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) {
if let Some(cfgpath) = args.value_of("config") {
info!("Reading configuration from '{}'", cfgpath);
for argname in vec!["dbconn", "datasource", "qgs"] {
if args.value_of(argname).is_some() {
warn!("Ignoring argument `{}`", argname);
}
}
let config = read_config(cfgpath).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
svc.connect();
(svc, config)
} else {
let bind = args.value_of("bind").unwrap_or("127.0.0.1");
let port =
u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number");
let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap();
config.webserver.bind = Some(bind.to_string());
config.webserver.port = Some(port);
let cache = match args.value_of("cache") {
None => Tilecache::Nocache(Nocache),
Some(dir) => Tilecache::Filecache(Filecache {
basepath: dir.to_string(),
baseurl: None,
}),
};
let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false);
let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false);
let grid = Grid::web_mercator();
let mut tilesets = Vec::new();
let datasources = if let Some(qgs) = args.value_of("qgs") {
info!("Reading configuration from '{}'", qgs);
let (datasources, mut tileset) = read_qgs(qgs);
for layer in tileset.layers.iter_mut() {
set_layer_buffer_defaults(layer, simplify, clip);
}
tilesets.push(tileset);
datasources
} else {
let datasources = Datasources::from_args(args);
if datasources.datasources.is_empty() {
println!("Either 'config', 'dbconn' or 'datasource' is required");
process::exit(1)
}
let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries)
for (_name, ds) in &datasources.datasources {
let dsconn = ds.connected();
let mut layers = dsconn.detect_layers(detect_geometry_types);
while let Some(mut l) = layers.pop() {
let extent = dsconn.layer_extent(&l);
set_layer_buffer_defaults(&mut l, simplify, clip);
let tileset = Tileset {
name: l.name.clone(),
attribution: None,
extent: extent,
layers: vec![l],
};
tilesets.push(tileset);
}
}
datasources
};
let mut svc = MvtService {
datasources: datasources,
grid: grid,
tilesets: tilesets,
cache: cache,
};
svc.connect(); //TODO: ugly - we connect twice
(svc, config)
}
}
pub fn webserver(args: &ArgMatches) {
let (mut service, config) = service_from_args(args);
let mvt_viewer = config.service.mvt.viewer;
let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let threads = config.webserver.threads.unwrap_or(4) as usize;
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
service.prepare_feature_queries();
service.init_cache();
let mut tileset_infos: Vec<TilesetInfo> = service
.tilesets
.iter()
.map(|set| TilesetInfo::from_tileset(&set))
.collect();
tileset_infos.sort_by_key(|ti| ti.name.clone());
let mut server = Nickel::with_data(service);
server.options = Options::default().thread_count(Some(threads));
// Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368)
server.keep_alive_timeout(None);
server.utilize(log_request);
server.get(
"/**(.style)?.json",
middleware! { |_req, mut res|
res.set(MediaType::Json);
res.set(AccessControlAllowMethods(vec![Method::Get]));
res.set(AccessControlAllowOrigin::Any);
},
);
server.get(
"/index.json",
middleware! { |_req, res|
let service: &MvtService = res.server_data();
let json = service.get_mvt_metadata().unwrap();
serde_json::to_vec(&json).unwrap()
},
);
// Font list for Maputnik
server.get(
"/fontstacks.json",
middleware! { |_req, _res|
let json = json!(["Roboto Medium","Roboto Regular"]);
serde_json::to_vec(&json).unwrap()
},
);
// Fonts for Maputnik
// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
server.get(
"/fonts/:fonts/:range.pbf",
middleware! { |req, mut res|
let fontpbfs = fonts();
let fontlist = req.param("fonts").unwrap();
let range = req.param("range").unwrap();
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
for font in fonts {
let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range);
debug!("Font lookup: {}", key);
if let Some(p | {
Some(0)
} | conditional_block |
server.rs | () -> StaticFiles {
let mut static_files = StaticFiles {
files: HashMap::new(),
};
static_files.add(
"favicon.ico",
include_bytes!("static/favicon.ico"),
MediaType::Ico,
);
static_files.add(
"index.html",
include_bytes!("static/index.html"),
MediaType::Html,
);
static_files.add(
"viewer.js",
include_bytes!("static/viewer.js"),
MediaType::Js,
);
static_files.add(
"viewer.css",
include_bytes!("static/viewer.css"),
MediaType::Css,
);
static_files.add(
"maputnik.html",
include_bytes!("static/maputnik.html"),
MediaType::Html,
);
static_files.add(
"maputnik.js",
include_bytes!("static/maputnik.js"),
MediaType::Js,
);
static_files.add(
"maputnik-vendor.js",
include_bytes!("static/maputnik-vendor.js"),
MediaType::Js,
);
static_files.add(
"img/maputnik.png",
include_bytes!("static/img/maputnik.png"),
MediaType::Png,
);
static_files.add(
"fonts/Roboto-Regular.ttf",
include_bytes!("static/fonts/Roboto-Regular.ttf"),
MediaType::Ttf,
);
static_files.add(
"fonts/Roboto-Medium.ttf",
include_bytes!("static/fonts/Roboto-Medium.ttf"),
MediaType::Ttf,
);
static_files
}
fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) {
self.files.insert(name, (data, media_type));
}
fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> {
let mut key = if name == "." {
"index.html".to_string()
} else {
name
};
if let Some(path) = base {
key = format!("{}/{}", path, key);
}
self.files.get(&key as &str)
}
}
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) {
layer.simplify = simplify;
if simplify {
// Limit features by default unless simplify is set to false
layer.query_limit = Some(1000);
}
layer.buffer_size = match layer.geometry_type {
Some(ref geom) => {
if clip {
if geom.contains("POLYGON") {
Some(1)
} else {
Some(0)
}
} else {
None
}
}
None => None,
};
}
pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) {
if let Some(cfgpath) = args.value_of("config") {
info!("Reading configuration from '{}'", cfgpath);
for argname in vec!["dbconn", "datasource", "qgs"] {
if args.value_of(argname).is_some() {
warn!("Ignoring argument `{}`", argname);
}
}
let config = read_config(cfgpath).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
svc.connect();
(svc, config)
} else {
let bind = args.value_of("bind").unwrap_or("127.0.0.1");
let port =
u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number");
let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap();
config.webserver.bind = Some(bind.to_string());
config.webserver.port = Some(port);
let cache = match args.value_of("cache") {
None => Tilecache::Nocache(Nocache),
Some(dir) => Tilecache::Filecache(Filecache {
basepath: dir.to_string(),
baseurl: None,
}),
};
let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false);
let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false);
let grid = Grid::web_mercator();
let mut tilesets = Vec::new();
let datasources = if let Some(qgs) = args.value_of("qgs") {
info!("Reading configuration from '{}'", qgs);
let (datasources, mut tileset) = read_qgs(qgs);
for layer in tileset.layers.iter_mut() {
set_layer_buffer_defaults(layer, simplify, clip);
}
tilesets.push(tileset);
datasources
} else {
let datasources = Datasources::from_args(args);
if datasources.datasources.is_empty() {
println!("Either 'config', 'dbconn' or 'datasource' is required");
process::exit(1)
}
let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries)
for (_name, ds) in &datasources.datasources {
let dsconn = ds.connected();
let mut layers = dsconn.detect_layers(detect_geometry_types);
while let Some(mut l) = layers.pop() {
let extent = dsconn.layer_extent(&l);
set_layer_buffer_defaults(&mut l, simplify, clip);
let tileset = Tileset {
name: l.name.clone(),
attribution: None,
extent: extent,
layers: vec![l],
};
tilesets.push(tileset);
}
}
datasources
};
let mut svc = MvtService {
datasources: datasources,
grid: grid,
tilesets: tilesets,
cache: cache,
};
svc.connect(); //TODO: ugly - we connect twice
(svc, config)
}
}
pub fn webserver(args: &ArgMatches) {
let (mut service, config) = service_from_args(args);
let mvt_viewer = config.service.mvt.viewer;
let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let threads = config.webserver.threads.unwrap_or(4) as usize;
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
service.prepare_feature_queries();
service.init_cache();
let mut tileset_infos: Vec<TilesetInfo> = service
.tilesets
.iter()
.map(|set| TilesetInfo::from_tileset(&set))
.collect();
tileset_infos.sort_by_key(|ti| ti.name.clone());
let mut server = Nickel::with_data(service);
server.options = Options::default().thread_count(Some(threads));
// Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368)
server.keep_alive_timeout(None);
server.utilize(log_request);
server.get(
"/**(.style)?.json",
middleware! { |_req, mut res|
res.set(MediaType::Json);
res.set(AccessControlAllowMethods(vec![Method::Get]));
res.set(AccessControlAllowOrigin::Any);
},
);
server.get(
"/index.json",
middleware! { |_req, res|
let service: &MvtService = res.server_data();
let json = service.get_mvt_metadata().unwrap();
serde_json::to_vec(&json).unwrap()
},
);
// Font list for Maputnik
server.get(
"/fontstacks.json",
middleware! { |_req, _res|
let json = json!(["Roboto Medium","Roboto Regular"]);
serde_json::to_vec(&json).unwrap()
},
);
// Fonts for Maputnik
// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
server.get(
"/fonts/:fonts/:range.pbf",
middleware! { |req, mut res|
let fontpbfs = fonts();
let fontlist = req.param("fonts").unwrap();
let range = req.param("range").unwrap();
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
| init | identifier_name |
|
server.rs | .html".to_string()
} else {
name
};
if let Some(path) = base {
key = format!("{}/{}", path, key);
}
self.files.get(&key as &str)
}
}
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) {
layer.simplify = simplify;
if simplify {
// Limit features by default unless simplify is set to false
layer.query_limit = Some(1000);
}
layer.buffer_size = match layer.geometry_type {
Some(ref geom) => {
if clip {
if geom.contains("POLYGON") {
Some(1)
} else {
Some(0)
}
} else {
None
}
}
None => None,
};
}
pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) {
if let Some(cfgpath) = args.value_of("config") {
info!("Reading configuration from '{}'", cfgpath);
for argname in vec!["dbconn", "datasource", "qgs"] {
if args.value_of(argname).is_some() {
warn!("Ignoring argument `{}`", argname);
}
}
let config = read_config(cfgpath).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| {
println!("Error reading configuration - {} ", err);
process::exit(1)
});
svc.connect();
(svc, config)
} else {
let bind = args.value_of("bind").unwrap_or("127.0.0.1");
let port =
u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number");
let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap();
config.webserver.bind = Some(bind.to_string());
config.webserver.port = Some(port);
let cache = match args.value_of("cache") {
None => Tilecache::Nocache(Nocache),
Some(dir) => Tilecache::Filecache(Filecache {
basepath: dir.to_string(),
baseurl: None,
}),
};
let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false);
let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false);
let grid = Grid::web_mercator();
let mut tilesets = Vec::new();
let datasources = if let Some(qgs) = args.value_of("qgs") {
info!("Reading configuration from '{}'", qgs);
let (datasources, mut tileset) = read_qgs(qgs);
for layer in tileset.layers.iter_mut() {
set_layer_buffer_defaults(layer, simplify, clip);
}
tilesets.push(tileset);
datasources
} else {
let datasources = Datasources::from_args(args);
if datasources.datasources.is_empty() {
println!("Either 'config', 'dbconn' or 'datasource' is required");
process::exit(1)
}
let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries)
for (_name, ds) in &datasources.datasources {
let dsconn = ds.connected();
let mut layers = dsconn.detect_layers(detect_geometry_types);
while let Some(mut l) = layers.pop() {
let extent = dsconn.layer_extent(&l);
set_layer_buffer_defaults(&mut l, simplify, clip);
let tileset = Tileset {
name: l.name.clone(),
attribution: None,
extent: extent,
layers: vec![l],
};
tilesets.push(tileset);
}
}
datasources
};
let mut svc = MvtService {
datasources: datasources,
grid: grid,
tilesets: tilesets,
cache: cache,
};
svc.connect(); //TODO: ugly - we connect twice
(svc, config)
}
}
pub fn webserver(args: &ArgMatches) {
let (mut service, config) = service_from_args(args);
let mvt_viewer = config.service.mvt.viewer;
let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let threads = config.webserver.threads.unwrap_or(4) as usize;
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
service.prepare_feature_queries();
service.init_cache();
let mut tileset_infos: Vec<TilesetInfo> = service
.tilesets
.iter()
.map(|set| TilesetInfo::from_tileset(&set))
.collect();
tileset_infos.sort_by_key(|ti| ti.name.clone());
let mut server = Nickel::with_data(service);
server.options = Options::default().thread_count(Some(threads));
// Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368)
server.keep_alive_timeout(None);
server.utilize(log_request);
server.get(
"/**(.style)?.json",
middleware! { |_req, mut res|
res.set(MediaType::Json);
res.set(AccessControlAllowMethods(vec![Method::Get]));
res.set(AccessControlAllowOrigin::Any);
},
);
server.get(
"/index.json",
middleware! { |_req, res|
let service: &MvtService = res.server_data();
let json = service.get_mvt_metadata().unwrap();
serde_json::to_vec(&json).unwrap()
},
);
// Font list for Maputnik
server.get(
"/fontstacks.json",
middleware! { |_req, _res|
let json = json!(["Roboto Medium","Roboto Regular"]);
serde_json::to_vec(&json).unwrap()
},
);
// Fonts for Maputnik
// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
server.get(
"/fonts/:fonts/:range.pbf",
middleware! { |req, mut res|
let fontpbfs = fonts();
let fontlist = req.param("fonts").unwrap();
let range = req.param("range").unwrap();
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
for font in fonts {
let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range);
debug!("Font lookup: {}", key);
if let Some(pbf) = fontpbfs.get(&key as &str) {
res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned()));
res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip]));
return res.send(*pbf)
}
}
},
);
server.get(
"/:tileset.json",
middleware! { |req, res|
let service: &MvtService = res.server_data();
let tileset = req.param("tileset").unwrap();
let host = req.origin.headers.get::<header::Host>().unwrap();
let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80));
let json = service.get_tilejson(&baseurl, &tileset).unwrap();
serde_json::to_vec(&json).unwrap()
},
);
server.get(
"/:tileset.style.json",
middleware! { |req, res|
let service: &MvtService = res.server_data();
let tileset = req.param("tileset").unwrap();
let host = req.origin.headers.get::<header::Host>().unwrap();
let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80));
let json = service.get_stylejson(&baseurl, &tileset).unwrap();
serde_json::to_vec(&json).unwrap()
},
);
server.get(
"/:tileset/metadata.json",
middleware! { |req, res|
let service: &MvtService = res.server_data();
let tileset = req.param("tileset").unwrap(); | let json = service.get_mbtiles_metadata(&tileset).unwrap(); | random_line_split |
|
mod.rs | as_ref()
.join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME);
let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?;
info!(nb_samples_detected = samples.len(), ?samples_folder);
for sample in samples {
info!(sample = ?sample.name, args = ?sample.args, "checking...");
let run = SampleRun::run(&sample)?;
is_success = is_success && run.is_success();
show_differences(&sample.name, &run.diffs, review_mode)?;
}
Ok(is_success || review_mode)
}
//TODO move to ui module to be customizable (in future)
pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> {
let mut updates_count = 0;
for entry in entries {
println!("{:-^1$}", "-", 80);
entry.show();
if review_mode && entry.review()? {
updates_count += 1
}
}
println!("{:-^1$}", "-", 80);
println!(
"number of differences in sample '{}': {}",
name,
entries.len(),
);
if review_mode {
println!("number of updates in sample '{}': {}", name, updates_count);
}
println!("{:-^1$}", "-", 80);
Ok(())
}
impl EntryDiff {
fn show(&self) {
match &self.difference {
Difference::Presence { expect, actual } => {
if *expect && !*actual {
println!(
"missing file in the actual: {}",
self.relative_path.to_string_lossy()
);
} else {
println!(
"unexpected file in the actual: {}",
self.relative_path.to_string_lossy()
);
}
}
Difference::Kind { expect, actual } => {
println!(
"difference kind of entry on: {}, expected: {:?}, actual: {:?}",
self.relative_path.to_string_lossy(),
expect,
actual
);
}
Difference::StringContent { expect, actual } => {
println!(
"difference detected on: {}\n",
self.relative_path.to_string_lossy()
);
crate::ui::show_difference_text(expect, actual, true);
}
Difference::BinaryContent {
expect_md5,
actual_md5,
} => {
println!(
"difference detected on: {} (detected as binary file)\n",
self.relative_path.to_string_lossy()
);
println!("expected md5: {}", expect_md5);
println!("actual md5: {}", actual_md5);
}
}
}
// TODO add test
fn review(&self) -> Result<bool> {
let accept_update = match self.difference {
Difference::Presence { expect, actual } => {
if expect && !actual | else if crate::ui::ask_to_update_sample("Accept to add into sample ?")? {
let path = self.actual_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if is_dir {
std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?;
} else {
std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?;
}
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
_ => {
if crate::ui::ask_to_update_sample("Accept to update file into sample ?")? {
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
};
Ok(accept_update)
}
}
#[derive(Debug, Clone)]
struct Sample {
pub name: String,
pub args: ApplyOpts,
pub expected: PathBuf,
pub existing: PathBuf,
pub ignores: Vec<PathPattern>,
}
impl Sample {
// scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing)
fn find_from_folder<B: AsRef<Path>>(
template_loc: &SourceLoc,
samples_folder: B,
tmp_dir: &TempDir,
) -> Result<Vec<Sample>> {
let mut out = vec![];
for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder {
path: samples_folder.as_ref().into(),
source,
})? {
let path = e?.path();
if path
.extension()
.filter(|x| x.to_string_lossy() == "expected")
.is_some()
{
let name = path
.file_stem()
.expect("folder should have a file name without extension")
.to_string_lossy()
.to_string();
let expected = path.clone();
let existing = path.with_extension("existing");
let args_file = path.with_extension("cfg.yaml");
let destination = tmp_dir.path().join(&name).to_path_buf();
let sample_cfg = SampleCfg::from_file(args_file)?;
let args = sample_cfg.make_args(template_loc, destination)?;
let ignores = sample_cfg.make_ignores()?;
out.push(Sample {
name,
args,
expected,
existing,
ignores,
});
}
}
Ok(out)
}
}
#[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)]
struct SampleCfg {
apply_args: Option<Vec<String>>,
check_ignores: Option<Vec<String>>,
}
impl SampleCfg {
fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> {
let v = if file.as_ref().exists() {
let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile {
path: file.as_ref().into(),
source,
})?;
serde_yaml::from_str::<SampleCfg>(&cfg_str)?
} else {
SampleCfg::default()
};
Ok(v)
}
fn make_ignores(&self) -> Result<Vec<PathPattern>> {
use std::str::FromStr;
let trim_chars: &[_] = &['\r', '\n', ' ', '\t', '"', '\''];
let ignores = self
.check_ignores
.clone()
.unwrap_or_default()
.iter()
.map(|v| v.trim_matches(trim_chars))
.filter(|v| !v.is_empty())
.map(PathPattern::from_str)
.collect::<Result<Vec<PathPattern>>>()?;
Ok(ignores)
}
fn make_args<B: AsRef<Path>>(
&self,
template_loc: &SourceLoc,
destination: B,
) -> Result<ApplyOpts> {
let cfg_args = self.apply_args.clone().unwrap_or_default();
let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
args_line.push("--confirm");
args_line.push("never");
args_line.push("--no-interaction");
args_line.push("--destination");
args_line.push(
destination
.as_ref()
.to_str()
.expect("to convert destination path into str"),
);
args_line.push("--source");
args_line.push(&template_loc.uri.raw);
if let Some(rev) = &template_loc.rev {
args_line.push("--rev");
args_line.push(rev);
}
let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy());
if let Some(subfolder) = buff.as_ref() {
args_line.push("--source-subfolder");
args_line.push(subfolder);
}
//HACK from_iter_safe expect first entry to be the binary name,
// unless clap::AppSettings::NoBinaryName has been used
// (but I don't know how to use it in this case, patch is welcomed)
args_line.insert(0, "apply");
args_line.insert(0, "ffizer");
CliOpts::try_parse_from(args_line)
.map_err(Error::from)
.and_then(|o| match o.cmd {
Command::Apply(g) => Ok(g),
e => Err(Error::Unknown(format!(
"command should always be parsed as 'apply' not as {:?}",
e
))),
})
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct SampleRun {
diffs: Vec<EntryDiff>,
}
impl SampleRun {
#[tracing::instrument]
pub fn run(sample: &Sample) -> Result<SampleRun> {
// ALTERNATIVE: fork a sub-process to run current ffizer in apply mode
let destination = &sample.args.dst_folder;
if sample.existing.exists() {
copy(&sample.existing, destination)?;
}
let ctx = crate::Ctx {
cmd_opt: sample | {
let path = self.expect_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if crate::ui::ask_to_update_sample("Accept to remove from sample ?")? {
if is_dir {
std::fs::remove_dir_all(&path)?;
} else {
std::fs::remove_file(&path)?;
}
true
} else {
false
}
} | conditional_block |
mod.rs | (), ?samples_folder);
for sample in samples {
info!(sample = ?sample.name, args = ?sample.args, "checking...");
let run = SampleRun::run(&sample)?;
is_success = is_success && run.is_success();
show_differences(&sample.name, &run.diffs, review_mode)?;
}
Ok(is_success || review_mode)
}
//TODO move to ui module to be customizable (in future)
pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> {
let mut updates_count = 0;
for entry in entries {
println!("{:-^1$}", "-", 80);
entry.show();
if review_mode && entry.review()? {
updates_count += 1
}
}
println!("{:-^1$}", "-", 80);
println!(
"number of differences in sample '{}': {}",
name,
entries.len(),
);
if review_mode {
println!("number of updates in sample '{}': {}", name, updates_count);
}
println!("{:-^1$}", "-", 80);
Ok(())
}
impl EntryDiff {
fn show(&self) {
match &self.difference {
Difference::Presence { expect, actual } => {
if *expect && !*actual {
println!(
"missing file in the actual: {}",
self.relative_path.to_string_lossy()
);
} else {
println!(
"unexpected file in the actual: {}",
self.relative_path.to_string_lossy()
);
}
}
Difference::Kind { expect, actual } => {
println!(
"difference kind of entry on: {}, expected: {:?}, actual: {:?}",
self.relative_path.to_string_lossy(),
expect,
actual
);
}
Difference::StringContent { expect, actual } => {
println!(
"difference detected on: {}\n",
self.relative_path.to_string_lossy()
);
crate::ui::show_difference_text(expect, actual, true);
}
Difference::BinaryContent {
expect_md5,
actual_md5,
} => {
println!(
"difference detected on: {} (detected as binary file)\n",
self.relative_path.to_string_lossy()
);
println!("expected md5: {}", expect_md5);
println!("actual md5: {}", actual_md5);
}
}
}
// TODO add test
fn review(&self) -> Result<bool> {
let accept_update = match self.difference {
Difference::Presence { expect, actual } => {
if expect && !actual {
let path = self.expect_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if crate::ui::ask_to_update_sample("Accept to remove from sample ?")? {
if is_dir {
std::fs::remove_dir_all(&path)?;
} else {
std::fs::remove_file(&path)?;
}
true
} else {
false
}
} else if crate::ui::ask_to_update_sample("Accept to add into sample ?")? {
let path = self.actual_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if is_dir {
std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?;
} else {
std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?;
}
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
_ => {
if crate::ui::ask_to_update_sample("Accept to update file into sample ?")? {
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
};
Ok(accept_update)
}
}
#[derive(Debug, Clone)]
struct Sample {
pub name: String,
pub args: ApplyOpts,
pub expected: PathBuf,
pub existing: PathBuf,
pub ignores: Vec<PathPattern>,
}
impl Sample {
// scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing)
fn find_from_folder<B: AsRef<Path>>(
template_loc: &SourceLoc,
samples_folder: B,
tmp_dir: &TempDir,
) -> Result<Vec<Sample>> {
let mut out = vec![];
for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder {
path: samples_folder.as_ref().into(),
source,
})? {
let path = e?.path();
if path
.extension()
.filter(|x| x.to_string_lossy() == "expected")
.is_some()
{
let name = path
.file_stem()
.expect("folder should have a file name without extension")
.to_string_lossy()
.to_string();
let expected = path.clone();
let existing = path.with_extension("existing");
let args_file = path.with_extension("cfg.yaml");
let destination = tmp_dir.path().join(&name).to_path_buf();
let sample_cfg = SampleCfg::from_file(args_file)?;
let args = sample_cfg.make_args(template_loc, destination)?;
let ignores = sample_cfg.make_ignores()?;
out.push(Sample {
name,
args,
expected,
existing,
ignores,
});
}
}
Ok(out)
}
}
#[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)]
struct SampleCfg {
apply_args: Option<Vec<String>>,
check_ignores: Option<Vec<String>>,
}
impl SampleCfg {
fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> {
let v = if file.as_ref().exists() {
let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile {
path: file.as_ref().into(),
source,
})?;
serde_yaml::from_str::<SampleCfg>(&cfg_str)?
} else {
SampleCfg::default()
};
Ok(v)
}
fn make_ignores(&self) -> Result<Vec<PathPattern>> {
use std::str::FromStr;
let trim_chars: &[_] = &['\r', '\n', ' ', '\t', '"', '\''];
let ignores = self
.check_ignores
.clone()
.unwrap_or_default()
.iter()
.map(|v| v.trim_matches(trim_chars))
.filter(|v| !v.is_empty())
.map(PathPattern::from_str)
.collect::<Result<Vec<PathPattern>>>()?;
Ok(ignores)
}
fn make_args<B: AsRef<Path>>(
&self,
template_loc: &SourceLoc,
destination: B,
) -> Result<ApplyOpts> {
let cfg_args = self.apply_args.clone().unwrap_or_default();
let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
args_line.push("--confirm");
args_line.push("never");
args_line.push("--no-interaction");
args_line.push("--destination");
args_line.push(
destination
.as_ref()
.to_str()
.expect("to convert destination path into str"),
);
args_line.push("--source");
args_line.push(&template_loc.uri.raw);
if let Some(rev) = &template_loc.rev {
args_line.push("--rev");
args_line.push(rev);
}
let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy());
if let Some(subfolder) = buff.as_ref() {
args_line.push("--source-subfolder");
args_line.push(subfolder);
}
//HACK from_iter_safe expect first entry to be the binary name,
// unless clap::AppSettings::NoBinaryName has been used
// (but I don't know how to use it in this case, patch is welcomed)
args_line.insert(0, "apply");
args_line.insert(0, "ffizer");
CliOpts::try_parse_from(args_line)
.map_err(Error::from)
.and_then(|o| match o.cmd {
Command::Apply(g) => Ok(g),
e => Err(Error::Unknown(format!(
"command should always be parsed as 'apply' not as {:?}",
e
))),
})
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct SampleRun {
diffs: Vec<EntryDiff>,
}
impl SampleRun {
#[tracing::instrument]
pub fn run(sample: &Sample) -> Result<SampleRun> | {
// ALTERNATIVE: fork a sub-process to run current ffizer in apply mode
let destination = &sample.args.dst_folder;
if sample.existing.exists() {
copy(&sample.existing, destination)?;
}
let ctx = crate::Ctx {
cmd_opt: sample.args.clone(),
};
crate::process(&ctx)?;
let diffs = dir_diff_list::search_diff(destination, &sample.expected, &sample.ignores)?;
Ok(SampleRun { diffs })
} | identifier_body |
|
mod.rs | as_ref()
.join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME);
let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?;
info!(nb_samples_detected = samples.len(), ?samples_folder);
for sample in samples {
info!(sample = ?sample.name, args = ?sample.args, "checking...");
let run = SampleRun::run(&sample)?;
is_success = is_success && run.is_success();
show_differences(&sample.name, &run.diffs, review_mode)?;
}
Ok(is_success || review_mode)
}
//TODO move to ui module to be customizable (in future)
pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> {
let mut updates_count = 0;
for entry in entries {
println!("{:-^1$}", "-", 80);
entry.show();
if review_mode && entry.review()? {
updates_count += 1
}
}
println!("{:-^1$}", "-", 80);
println!(
"number of differences in sample '{}': {}",
name,
entries.len(),
);
if review_mode {
println!("number of updates in sample '{}': {}", name, updates_count);
}
println!("{:-^1$}", "-", 80);
Ok(())
}
impl EntryDiff {
fn show(&self) {
match &self.difference {
Difference::Presence { expect, actual } => {
if *expect && !*actual {
println!(
"missing file in the actual: {}",
self.relative_path.to_string_lossy()
);
} else {
println!(
"unexpected file in the actual: {}",
self.relative_path.to_string_lossy()
);
}
}
Difference::Kind { expect, actual } => {
println!(
"difference kind of entry on: {}, expected: {:?}, actual: {:?}",
self.relative_path.to_string_lossy(),
expect,
actual
);
}
Difference::StringContent { expect, actual } => {
println!(
"difference detected on: {}\n",
self.relative_path.to_string_lossy()
);
crate::ui::show_difference_text(expect, actual, true);
}
Difference::BinaryContent {
expect_md5,
actual_md5,
} => {
println!(
"difference detected on: {} (detected as binary file)\n",
self.relative_path.to_string_lossy()
);
println!("expected md5: {}", expect_md5);
println!("actual md5: {}", actual_md5);
}
}
}
// TODO add test
fn review(&self) -> Result<bool> {
let accept_update = match self.difference {
Difference::Presence { expect, actual } => {
if expect && !actual {
let path = self.expect_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if crate::ui::ask_to_update_sample("Accept to remove from sample ?")? {
if is_dir {
std::fs::remove_dir_all(&path)?;
} else {
std::fs::remove_file(&path)?;
}
true
} else {
false
}
} else if crate::ui::ask_to_update_sample("Accept to add into sample ?")? {
let path = self.actual_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if is_dir {
std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?;
} else {
std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?;
}
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
_ => {
if crate::ui::ask_to_update_sample("Accept to update file into sample ?")? {
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
};
Ok(accept_update)
}
}
#[derive(Debug, Clone)]
struct Sample {
pub name: String,
pub args: ApplyOpts,
pub expected: PathBuf,
pub existing: PathBuf,
pub ignores: Vec<PathPattern>,
}
impl Sample {
// scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing) | samples_folder: B,
tmp_dir: &TempDir,
) -> Result<Vec<Sample>> {
let mut out = vec![];
for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder {
path: samples_folder.as_ref().into(),
source,
})? {
let path = e?.path();
if path
.extension()
.filter(|x| x.to_string_lossy() == "expected")
.is_some()
{
let name = path
.file_stem()
.expect("folder should have a file name without extension")
.to_string_lossy()
.to_string();
let expected = path.clone();
let existing = path.with_extension("existing");
let args_file = path.with_extension("cfg.yaml");
let destination = tmp_dir.path().join(&name).to_path_buf();
let sample_cfg = SampleCfg::from_file(args_file)?;
let args = sample_cfg.make_args(template_loc, destination)?;
let ignores = sample_cfg.make_ignores()?;
out.push(Sample {
name,
args,
expected,
existing,
ignores,
});
}
}
Ok(out)
}
}
#[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)]
struct SampleCfg {
apply_args: Option<Vec<String>>,
check_ignores: Option<Vec<String>>,
}
impl SampleCfg {
fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> {
let v = if file.as_ref().exists() {
let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile {
path: file.as_ref().into(),
source,
})?;
serde_yaml::from_str::<SampleCfg>(&cfg_str)?
} else {
SampleCfg::default()
};
Ok(v)
}
fn make_ignores(&self) -> Result<Vec<PathPattern>> {
use std::str::FromStr;
let trim_chars: &[_] = &['\r', '\n', ' ', '\t', '"', '\''];
let ignores = self
.check_ignores
.clone()
.unwrap_or_default()
.iter()
.map(|v| v.trim_matches(trim_chars))
.filter(|v| !v.is_empty())
.map(PathPattern::from_str)
.collect::<Result<Vec<PathPattern>>>()?;
Ok(ignores)
}
fn make_args<B: AsRef<Path>>(
&self,
template_loc: &SourceLoc,
destination: B,
) -> Result<ApplyOpts> {
let cfg_args = self.apply_args.clone().unwrap_or_default();
let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
args_line.push("--confirm");
args_line.push("never");
args_line.push("--no-interaction");
args_line.push("--destination");
args_line.push(
destination
.as_ref()
.to_str()
.expect("to convert destination path into str"),
);
args_line.push("--source");
args_line.push(&template_loc.uri.raw);
if let Some(rev) = &template_loc.rev {
args_line.push("--rev");
args_line.push(rev);
}
let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy());
if let Some(subfolder) = buff.as_ref() {
args_line.push("--source-subfolder");
args_line.push(subfolder);
}
//HACK from_iter_safe expect first entry to be the binary name,
// unless clap::AppSettings::NoBinaryName has been used
// (but I don't know how to use it in this case, patch is welcomed)
args_line.insert(0, "apply");
args_line.insert(0, "ffizer");
CliOpts::try_parse_from(args_line)
.map_err(Error::from)
.and_then(|o| match o.cmd {
Command::Apply(g) => Ok(g),
e => Err(Error::Unknown(format!(
"command should always be parsed as 'apply' not as {:?}",
e
))),
})
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct SampleRun {
diffs: Vec<EntryDiff>,
}
impl SampleRun {
#[tracing::instrument]
pub fn run(sample: &Sample) -> Result<SampleRun> {
// ALTERNATIVE: fork a sub-process to run current ffizer in apply mode
let destination = &sample.args.dst_folder;
if sample.existing.exists() {
copy(&sample.existing, destination)?;
}
let ctx = crate::Ctx {
cmd_opt: sample.args | fn find_from_folder<B: AsRef<Path>>(
template_loc: &SourceLoc, | random_line_split |
mod.rs | as_ref()
.join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME);
let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?;
info!(nb_samples_detected = samples.len(), ?samples_folder);
for sample in samples {
info!(sample = ?sample.name, args = ?sample.args, "checking...");
let run = SampleRun::run(&sample)?;
is_success = is_success && run.is_success();
show_differences(&sample.name, &run.diffs, review_mode)?;
}
Ok(is_success || review_mode)
}
//TODO move to ui module to be customizable (in future)
pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> {
let mut updates_count = 0;
for entry in entries {
println!("{:-^1$}", "-", 80);
entry.show();
if review_mode && entry.review()? {
updates_count += 1
}
}
println!("{:-^1$}", "-", 80);
println!(
"number of differences in sample '{}': {}",
name,
entries.len(),
);
if review_mode {
println!("number of updates in sample '{}': {}", name, updates_count);
}
println!("{:-^1$}", "-", 80);
Ok(())
}
impl EntryDiff {
fn show(&self) {
match &self.difference {
Difference::Presence { expect, actual } => {
if *expect && !*actual {
println!(
"missing file in the actual: {}",
self.relative_path.to_string_lossy()
);
} else {
println!(
"unexpected file in the actual: {}",
self.relative_path.to_string_lossy()
);
}
}
Difference::Kind { expect, actual } => {
println!(
"difference kind of entry on: {}, expected: {:?}, actual: {:?}",
self.relative_path.to_string_lossy(),
expect,
actual
);
}
Difference::StringContent { expect, actual } => {
println!(
"difference detected on: {}\n",
self.relative_path.to_string_lossy()
);
crate::ui::show_difference_text(expect, actual, true);
}
Difference::BinaryContent {
expect_md5,
actual_md5,
} => {
println!(
"difference detected on: {} (detected as binary file)\n",
self.relative_path.to_string_lossy()
);
println!("expected md5: {}", expect_md5);
println!("actual md5: {}", actual_md5);
}
}
}
// TODO add test
fn review(&self) -> Result<bool> {
let accept_update = match self.difference {
Difference::Presence { expect, actual } => {
if expect && !actual {
let path = self.expect_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if crate::ui::ask_to_update_sample("Accept to remove from sample ?")? {
if is_dir {
std::fs::remove_dir_all(&path)?;
} else {
std::fs::remove_file(&path)?;
}
true
} else {
false
}
} else if crate::ui::ask_to_update_sample("Accept to add into sample ?")? {
let path = self.actual_base_path.join(&self.relative_path);
let is_dir = std::fs::metadata(&path)?.is_dir();
if is_dir {
std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?;
} else {
std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?;
}
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
_ => {
if crate::ui::ask_to_update_sample("Accept to update file into sample ?")? {
std::fs::copy(
self.actual_base_path.join(&self.relative_path),
self.expect_base_path.join(&self.relative_path),
)?;
true
} else {
false
}
}
};
Ok(accept_update)
}
}
#[derive(Debug, Clone)]
struct Sample {
pub name: String,
pub args: ApplyOpts,
pub expected: PathBuf,
pub existing: PathBuf,
pub ignores: Vec<PathPattern>,
}
impl Sample {
// scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing)
fn find_from_folder<B: AsRef<Path>>(
template_loc: &SourceLoc,
samples_folder: B,
tmp_dir: &TempDir,
) -> Result<Vec<Sample>> {
let mut out = vec![];
for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder {
path: samples_folder.as_ref().into(),
source,
})? {
let path = e?.path();
if path
.extension()
.filter(|x| x.to_string_lossy() == "expected")
.is_some()
{
let name = path
.file_stem()
.expect("folder should have a file name without extension")
.to_string_lossy()
.to_string();
let expected = path.clone();
let existing = path.with_extension("existing");
let args_file = path.with_extension("cfg.yaml");
let destination = tmp_dir.path().join(&name).to_path_buf();
let sample_cfg = SampleCfg::from_file(args_file)?;
let args = sample_cfg.make_args(template_loc, destination)?;
let ignores = sample_cfg.make_ignores()?;
out.push(Sample {
name,
args,
expected,
existing,
ignores,
});
}
}
Ok(out)
}
}
#[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)]
struct SampleCfg {
apply_args: Option<Vec<String>>,
check_ignores: Option<Vec<String>>,
}
impl SampleCfg {
fn | <P: AsRef<Path>>(file: P) -> Result<Self> {
let v = if file.as_ref().exists() {
let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile {
path: file.as_ref().into(),
source,
})?;
serde_yaml::from_str::<SampleCfg>(&cfg_str)?
} else {
SampleCfg::default()
};
Ok(v)
}
fn make_ignores(&self) -> Result<Vec<PathPattern>> {
use std::str::FromStr;
let trim_chars: &[_] = &['\r', '\n', ' ', '\t', '"', '\''];
let ignores = self
.check_ignores
.clone()
.unwrap_or_default()
.iter()
.map(|v| v.trim_matches(trim_chars))
.filter(|v| !v.is_empty())
.map(PathPattern::from_str)
.collect::<Result<Vec<PathPattern>>>()?;
Ok(ignores)
}
fn make_args<B: AsRef<Path>>(
&self,
template_loc: &SourceLoc,
destination: B,
) -> Result<ApplyOpts> {
let cfg_args = self.apply_args.clone().unwrap_or_default();
let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
args_line.push("--confirm");
args_line.push("never");
args_line.push("--no-interaction");
args_line.push("--destination");
args_line.push(
destination
.as_ref()
.to_str()
.expect("to convert destination path into str"),
);
args_line.push("--source");
args_line.push(&template_loc.uri.raw);
if let Some(rev) = &template_loc.rev {
args_line.push("--rev");
args_line.push(rev);
}
let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy());
if let Some(subfolder) = buff.as_ref() {
args_line.push("--source-subfolder");
args_line.push(subfolder);
}
//HACK from_iter_safe expect first entry to be the binary name,
// unless clap::AppSettings::NoBinaryName has been used
// (but I don't know how to use it in this case, patch is welcomed)
args_line.insert(0, "apply");
args_line.insert(0, "ffizer");
CliOpts::try_parse_from(args_line)
.map_err(Error::from)
.and_then(|o| match o.cmd {
Command::Apply(g) => Ok(g),
e => Err(Error::Unknown(format!(
"command should always be parsed as 'apply' not as {:?}",
e
))),
})
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct SampleRun {
diffs: Vec<EntryDiff>,
}
impl SampleRun {
#[tracing::instrument]
pub fn run(sample: &Sample) -> Result<SampleRun> {
// ALTERNATIVE: fork a sub-process to run current ffizer in apply mode
let destination = &sample.args.dst_folder;
if sample.existing.exists() {
copy(&sample.existing, destination)?;
}
let ctx = crate::Ctx {
cmd_opt: sample | from_file | identifier_name |
train.py | time
import torch
import pickle
import shutil
import math
import evaluator
import net
import optimizer as optim
from torchtext import data
import utils
from config import get_train_args
def save_checkpoint(state, is_best, model_path_, best_model_path_):
torch.save(state, model_path_)
if is_best:
shutil.copyfile(model_path_, best_model_path_)
def batch_size_func(new, count, sofar):
# return sofar + len(new[0]) + len(new[1])
return sofar + (2 * max(len(new[0]), len(new[1])))
def save_output(hypotheses, vocab, outf):
# Save the Hypothesis to output file
with io.open(outf, 'w') as fp:
for sent in hypotheses:
words = [vocab[y] for y in sent]
fp.write(' '.join(words) + '\n')
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def report_func(epoch, batch, num_batches, start_time, report_stats,
report_every):
"""
This is the user-defined batch-level training progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % report_every == -1 % report_every:
report_stats.output(epoch, batch + 1, num_batches, start_time)
report_stats = utils.Statistics()
return report_stats
class CalculateBleu(object):
def __init__(self, model, test_data, key, batch=50, max_length=50,
beam_size=1, alpha=0.6, max_sent=None):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = -1
self.max_length = max_length
self.beam_size = beam_size
self.alpha = alpha
self.max_sent = max_sent
def __call__(self):
self.model.eval()
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend(t.tolist() for t in targets)
if self.beam_size > 1:
ys = self.model.translate(sources,
self.max_length,
beam=self.beam_size,
alpha=self.alpha)
else:
ys = [y.tolist() for y in
self.model.translate(sources,
self.max_length,
beam=False)]
hypotheses.extend(ys)
if self.max_sent is not None and \
((i + 1) > self.max_sent):
break
# Log Progress
if self.max_sent is not None:
den = self.max_sent
else:
den = len(self.test_data)
print("> Completed: [ %d / %d ]" % (i, den), end='\r')
bleu = evaluator.BLEUEvaluator().evaluate(references, hypotheses)
print('BLEU:', bleu.score_str())
print('')
return bleu.bleu, hypotheses
def main():
best_score = 0
args = get_train_args()
print(json.dumps(args.__dict__, indent=4))
# Reading the int indexed text dataset
train_data = np.load(os.path.join(args.input, args.data + ".train.npy"))
train_data = train_data.tolist()
dev_data = np.load(os.path.join(args.input, args.data + ".valid.npy"))
dev_data = dev_data.tolist()
test_data = np.load(os.path.join(args.input, args.data + ".test.npy"))
test_data = test_data.tolist()
# Reading the vocab file
with open(os.path.join(args.input, args.data + '.vocab.pickle'),
'rb') as f:
id2w = pickle.load(f)
args.id2w = id2w
args.n_vocab = len(id2w)
# Define Model
model = net.Transformer(args)
tally_parameters(model)
if args.gpu >= 0:
model.cuda(args.gpu)
print(model)
optimizer = optim.TransformerAdamTrainer(model, args)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
args.start_epoch = checkpoint['epoch']
best_score = checkpoint['best_score']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".
format(args.model_file, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.model_file))
src_data, trg_data = list(zip(*train_data))
total_src_words = len(list(itertools.chain.from_iterable(src_data)))
total_trg_words = len(list(itertools.chain.from_iterable(trg_data)))
iter_per_epoch = (total_src_words + total_trg_words) // args.wbatchsize
print('Approximate number of iter/epoch =', iter_per_epoch)
time_s = time()
global_steps = 0
for epoch in range(args.start_epoch, args.epoch):
random.shuffle(train_data)
train_iter = data.iterator.pool(train_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
report_stats = utils.Statistics()
train_stats = utils.Statistics()
valid_stats = utils.Statistics()
if args.debug:
grad_norm = 0.
for num_steps, train_batch in enumerate(train_iter):
global_steps += 1
model.train()
optimizer.zero_grad()
src_iter = list(zip(*train_batch))[0]
src_words = len(list(itertools.chain.from_iterable(src_iter)))
report_stats.n_src_words += src_words
train_stats.n_src_words += src_words
in_arrays = utils.seq2seq_pad_concat_convert(train_batch, -1)
loss, stat = model(*in_arrays)
loss.backward()
if args.debug:
norm = utils.grad_norm(model.parameters())
grad_norm += norm
if global_steps % args.report_every == 0:
print("> Gradient Norm: %1.4f" % (grad_norm / (num_steps + 1)))
optimizer.step()
report_stats.update(stat)
train_stats.update(stat)
report_stats = report_func(epoch, num_steps, iter_per_epoch,
time_s, report_stats, args.report_every)
if (global_steps + 1) % args.eval_steps == 0:
dev_iter = data.iterator.pool(dev_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
for dev_batch in dev_iter:
model.eval()
in_arrays = utils.seq2seq_pad_concat_convert(dev_batch, -1)
loss_test, stat = model(*in_arrays)
valid_stats.update(stat)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
|
bleu_score, _ = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha,
max_sent=args.max_sent_eval)()
if args.metric == "bleu":
score = bleu_score
elif args.metric == "accuracy":
score = valid_stats.accuracy()
is_best = score > best_score
best_score = max(score, best_score)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_score': best_score,
'optimizer': optimizer.state_dict(),
'opts': args,
}, is_best,
args.model_file,
args.best_model_file)
# BLEU score on Dev and Test Data
checkpoint = torch.load(args.best_model_file)
print("=> loaded checkpoint '{}' (epoch {}, best score {})".
format(args.best_model_file,
checkpoint['epoch'],
checkpoint['best_score']))
model.load_state_dict(checkpoint['state_dict'])
print('Dev Set BLEU Score')
_, dev_hyp = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha)()
| print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy()) | random_line_split |
train.py | time
import torch
import pickle
import shutil
import math
import evaluator
import net
import optimizer as optim
from torchtext import data
import utils
from config import get_train_args
def save_checkpoint(state, is_best, model_path_, best_model_path_):
torch.save(state, model_path_)
if is_best:
shutil.copyfile(model_path_, best_model_path_)
def batch_size_func(new, count, sofar):
# return sofar + len(new[0]) + len(new[1])
return sofar + (2 * max(len(new[0]), len(new[1])))
def save_output(hypotheses, vocab, outf):
# Save the Hypothesis to output file
with io.open(outf, 'w') as fp:
for sent in hypotheses:
words = [vocab[y] for y in sent]
fp.write(' '.join(words) + '\n')
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def report_func(epoch, batch, num_batches, start_time, report_stats,
report_every):
"""
This is the user-defined batch-level training progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % report_every == -1 % report_every:
report_stats.output(epoch, batch + 1, num_batches, start_time)
report_stats = utils.Statistics()
return report_stats
class CalculateBleu(object):
def __init__(self, model, test_data, key, batch=50, max_length=50,
beam_size=1, alpha=0.6, max_sent=None):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = -1
self.max_length = max_length
self.beam_size = beam_size
self.alpha = alpha
self.max_sent = max_sent
def __call__(self):
self.model.eval()
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend(t.tolist() for t in targets)
if self.beam_size > 1:
ys = self.model.translate(sources,
self.max_length,
beam=self.beam_size,
alpha=self.alpha)
else:
ys = [y.tolist() for y in
self.model.translate(sources,
self.max_length,
beam=False)]
hypotheses.extend(ys)
if self.max_sent is not None and \
((i + 1) > self.max_sent):
break
# Log Progress
if self.max_sent is not None:
den = self.max_sent
else:
den = len(self.test_data)
print("> Completed: [ %d / %d ]" % (i, den), end='\r')
bleu = evaluator.BLEUEvaluator().evaluate(references, hypotheses)
print('BLEU:', bleu.score_str())
print('')
return bleu.bleu, hypotheses
def main():
best_score = 0
args = get_train_args()
print(json.dumps(args.__dict__, indent=4))
# Reading the int indexed text dataset
train_data = np.load(os.path.join(args.input, args.data + ".train.npy"))
train_data = train_data.tolist()
dev_data = np.load(os.path.join(args.input, args.data + ".valid.npy"))
dev_data = dev_data.tolist()
test_data = np.load(os.path.join(args.input, args.data + ".test.npy"))
test_data = test_data.tolist()
# Reading the vocab file
with open(os.path.join(args.input, args.data + '.vocab.pickle'),
'rb') as f:
id2w = pickle.load(f)
args.id2w = id2w
args.n_vocab = len(id2w)
# Define Model
model = net.Transformer(args)
tally_parameters(model)
if args.gpu >= 0:
model.cuda(args.gpu)
print(model)
optimizer = optim.TransformerAdamTrainer(model, args)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
args.start_epoch = checkpoint['epoch']
best_score = checkpoint['best_score']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".
format(args.model_file, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.model_file))
src_data, trg_data = list(zip(*train_data))
total_src_words = len(list(itertools.chain.from_iterable(src_data)))
total_trg_words = len(list(itertools.chain.from_iterable(trg_data)))
iter_per_epoch = (total_src_words + total_trg_words) // args.wbatchsize
print('Approximate number of iter/epoch =', iter_per_epoch)
time_s = time()
global_steps = 0
for epoch in range(args.start_epoch, args.epoch):
random.shuffle(train_data)
train_iter = data.iterator.pool(train_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
report_stats = utils.Statistics()
train_stats = utils.Statistics()
valid_stats = utils.Statistics()
if args.debug:
grad_norm = 0.
for num_steps, train_batch in enumerate(train_iter):
global_steps += 1
model.train()
optimizer.zero_grad()
src_iter = list(zip(*train_batch))[0]
src_words = len(list(itertools.chain.from_iterable(src_iter)))
report_stats.n_src_words += src_words
train_stats.n_src_words += src_words
in_arrays = utils.seq2seq_pad_concat_convert(train_batch, -1)
loss, stat = model(*in_arrays)
loss.backward()
if args.debug:
norm = utils.grad_norm(model.parameters())
grad_norm += norm
if global_steps % args.report_every == 0:
print("> Gradient Norm: %1.4f" % (grad_norm / (num_steps + 1)))
optimizer.step()
report_stats.update(stat)
train_stats.update(stat)
report_stats = report_func(epoch, num_steps, iter_per_epoch,
time_s, report_stats, args.report_every)
if (global_steps + 1) % args.eval_steps == 0:
dev_iter = data.iterator.pool(dev_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
for dev_batch in dev_iter:
|
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
bleu_score, _ = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha,
max_sent=args.max_sent_eval)()
if args.metric == "bleu":
score = bleu_score
elif args.metric == "accuracy":
score = valid_stats.accuracy()
is_best = score > best_score
best_score = max(score, best_score)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_score': best_score,
'optimizer': optimizer.state_dict(),
'opts': args,
}, is_best,
args.model_file,
args.best_model_file)
# BLEU score on Dev and Test Data
checkpoint = torch.load(args.best_model_file)
print("=> loaded checkpoint '{}' (epoch {}, best score {})".
format(args.best_model_file,
checkpoint['epoch'],
checkpoint['best_score']))
model.load_state_dict(checkpoint['state_dict'])
print('Dev Set BLEU Score')
_, dev_hyp = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha)()
| model.eval()
in_arrays = utils.seq2seq_pad_concat_convert(dev_batch, -1)
loss_test, stat = model(*in_arrays)
valid_stats.update(stat) | conditional_block |
train.py | time
import torch
import pickle
import shutil
import math
import evaluator
import net
import optimizer as optim
from torchtext import data
import utils
from config import get_train_args
def save_checkpoint(state, is_best, model_path_, best_model_path_):
torch.save(state, model_path_)
if is_best:
shutil.copyfile(model_path_, best_model_path_)
def | (new, count, sofar):
# return sofar + len(new[0]) + len(new[1])
return sofar + (2 * max(len(new[0]), len(new[1])))
def save_output(hypotheses, vocab, outf):
# Save the Hypothesis to output file
with io.open(outf, 'w') as fp:
for sent in hypotheses:
words = [vocab[y] for y in sent]
fp.write(' '.join(words) + '\n')
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def report_func(epoch, batch, num_batches, start_time, report_stats,
report_every):
"""
This is the user-defined batch-level training progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % report_every == -1 % report_every:
report_stats.output(epoch, batch + 1, num_batches, start_time)
report_stats = utils.Statistics()
return report_stats
class CalculateBleu(object):
def __init__(self, model, test_data, key, batch=50, max_length=50,
beam_size=1, alpha=0.6, max_sent=None):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = -1
self.max_length = max_length
self.beam_size = beam_size
self.alpha = alpha
self.max_sent = max_sent
def __call__(self):
self.model.eval()
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend(t.tolist() for t in targets)
if self.beam_size > 1:
ys = self.model.translate(sources,
self.max_length,
beam=self.beam_size,
alpha=self.alpha)
else:
ys = [y.tolist() for y in
self.model.translate(sources,
self.max_length,
beam=False)]
hypotheses.extend(ys)
if self.max_sent is not None and \
((i + 1) > self.max_sent):
break
# Log Progress
if self.max_sent is not None:
den = self.max_sent
else:
den = len(self.test_data)
print("> Completed: [ %d / %d ]" % (i, den), end='\r')
bleu = evaluator.BLEUEvaluator().evaluate(references, hypotheses)
print('BLEU:', bleu.score_str())
print('')
return bleu.bleu, hypotheses
def main():
best_score = 0
args = get_train_args()
print(json.dumps(args.__dict__, indent=4))
# Reading the int indexed text dataset
train_data = np.load(os.path.join(args.input, args.data + ".train.npy"))
train_data = train_data.tolist()
dev_data = np.load(os.path.join(args.input, args.data + ".valid.npy"))
dev_data = dev_data.tolist()
test_data = np.load(os.path.join(args.input, args.data + ".test.npy"))
test_data = test_data.tolist()
# Reading the vocab file
with open(os.path.join(args.input, args.data + '.vocab.pickle'),
'rb') as f:
id2w = pickle.load(f)
args.id2w = id2w
args.n_vocab = len(id2w)
# Define Model
model = net.Transformer(args)
tally_parameters(model)
if args.gpu >= 0:
model.cuda(args.gpu)
print(model)
optimizer = optim.TransformerAdamTrainer(model, args)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
args.start_epoch = checkpoint['epoch']
best_score = checkpoint['best_score']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".
format(args.model_file, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.model_file))
src_data, trg_data = list(zip(*train_data))
total_src_words = len(list(itertools.chain.from_iterable(src_data)))
total_trg_words = len(list(itertools.chain.from_iterable(trg_data)))
iter_per_epoch = (total_src_words + total_trg_words) // args.wbatchsize
print('Approximate number of iter/epoch =', iter_per_epoch)
time_s = time()
global_steps = 0
for epoch in range(args.start_epoch, args.epoch):
random.shuffle(train_data)
train_iter = data.iterator.pool(train_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
report_stats = utils.Statistics()
train_stats = utils.Statistics()
valid_stats = utils.Statistics()
if args.debug:
grad_norm = 0.
for num_steps, train_batch in enumerate(train_iter):
global_steps += 1
model.train()
optimizer.zero_grad()
src_iter = list(zip(*train_batch))[0]
src_words = len(list(itertools.chain.from_iterable(src_iter)))
report_stats.n_src_words += src_words
train_stats.n_src_words += src_words
in_arrays = utils.seq2seq_pad_concat_convert(train_batch, -1)
loss, stat = model(*in_arrays)
loss.backward()
if args.debug:
norm = utils.grad_norm(model.parameters())
grad_norm += norm
if global_steps % args.report_every == 0:
print("> Gradient Norm: %1.4f" % (grad_norm / (num_steps + 1)))
optimizer.step()
report_stats.update(stat)
train_stats.update(stat)
report_stats = report_func(epoch, num_steps, iter_per_epoch,
time_s, report_stats, args.report_every)
if (global_steps + 1) % args.eval_steps == 0:
dev_iter = data.iterator.pool(dev_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
for dev_batch in dev_iter:
model.eval()
in_arrays = utils.seq2seq_pad_concat_convert(dev_batch, -1)
loss_test, stat = model(*in_arrays)
valid_stats.update(stat)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
bleu_score, _ = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha,
max_sent=args.max_sent_eval)()
if args.metric == "bleu":
score = bleu_score
elif args.metric == "accuracy":
score = valid_stats.accuracy()
is_best = score > best_score
best_score = max(score, best_score)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_score': best_score,
'optimizer': optimizer.state_dict(),
'opts': args,
}, is_best,
args.model_file,
args.best_model_file)
# BLEU score on Dev and Test Data
checkpoint = torch.load(args.best_model_file)
print("=> loaded checkpoint '{}' (epoch {}, best score {})".
format(args.best_model_file,
checkpoint['epoch'],
checkpoint['best_score']))
model.load_state_dict(checkpoint['state_dict'])
print('Dev Set BLEU Score')
_, dev_hyp = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha)()
| batch_size_func | identifier_name |
train.py | time
import torch
import pickle
import shutil
import math
import evaluator
import net
import optimizer as optim
from torchtext import data
import utils
from config import get_train_args
def save_checkpoint(state, is_best, model_path_, best_model_path_):
torch.save(state, model_path_)
if is_best:
shutil.copyfile(model_path_, best_model_path_)
def batch_size_func(new, count, sofar):
# return sofar + len(new[0]) + len(new[1])
return sofar + (2 * max(len(new[0]), len(new[1])))
def save_output(hypotheses, vocab, outf):
# Save the Hypothesis to output file
with io.open(outf, 'w') as fp:
for sent in hypotheses:
words = [vocab[y] for y in sent]
fp.write(' '.join(words) + '\n')
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def report_func(epoch, batch, num_batches, start_time, report_stats,
report_every):
"""
This is the user-defined batch-level training progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % report_every == -1 % report_every:
report_stats.output(epoch, batch + 1, num_batches, start_time)
report_stats = utils.Statistics()
return report_stats
class CalculateBleu(object):
| ys = self.model.translate(sources,
self.max_length,
beam=self.beam_size,
alpha=self.alpha)
else:
ys = [y.tolist() for y in
self.model.translate(sources,
self.max_length,
beam=False)]
hypotheses.extend(ys)
if self.max_sent is not None and \
((i + 1) > self.max_sent):
break
# Log Progress
if self.max_sent is not None:
den = self.max_sent
else:
den = len(self.test_data)
print("> Completed: [ %d / %d ]" % (i, den), end='\r')
bleu = evaluator.BLEUEvaluator().evaluate(references, hypotheses)
print('BLEU:', bleu.score_str())
print('')
return bleu.bleu, hypotheses
def main():
best_score = 0
args = get_train_args()
print(json.dumps(args.__dict__, indent=4))
# Reading the int indexed text dataset
train_data = np.load(os.path.join(args.input, args.data + ".train.npy"))
train_data = train_data.tolist()
dev_data = np.load(os.path.join(args.input, args.data + ".valid.npy"))
dev_data = dev_data.tolist()
test_data = np.load(os.path.join(args.input, args.data + ".test.npy"))
test_data = test_data.tolist()
# Reading the vocab file
with open(os.path.join(args.input, args.data + '.vocab.pickle'),
'rb') as f:
id2w = pickle.load(f)
args.id2w = id2w
args.n_vocab = len(id2w)
# Define Model
model = net.Transformer(args)
tally_parameters(model)
if args.gpu >= 0:
model.cuda(args.gpu)
print(model)
optimizer = optim.TransformerAdamTrainer(model, args)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
args.start_epoch = checkpoint['epoch']
best_score = checkpoint['best_score']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".
format(args.model_file, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.model_file))
src_data, trg_data = list(zip(*train_data))
total_src_words = len(list(itertools.chain.from_iterable(src_data)))
total_trg_words = len(list(itertools.chain.from_iterable(trg_data)))
iter_per_epoch = (total_src_words + total_trg_words) // args.wbatchsize
print('Approximate number of iter/epoch =', iter_per_epoch)
time_s = time()
global_steps = 0
for epoch in range(args.start_epoch, args.epoch):
random.shuffle(train_data)
train_iter = data.iterator.pool(train_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
report_stats = utils.Statistics()
train_stats = utils.Statistics()
valid_stats = utils.Statistics()
if args.debug:
grad_norm = 0.
for num_steps, train_batch in enumerate(train_iter):
global_steps += 1
model.train()
optimizer.zero_grad()
src_iter = list(zip(*train_batch))[0]
src_words = len(list(itertools.chain.from_iterable(src_iter)))
report_stats.n_src_words += src_words
train_stats.n_src_words += src_words
in_arrays = utils.seq2seq_pad_concat_convert(train_batch, -1)
loss, stat = model(*in_arrays)
loss.backward()
if args.debug:
norm = utils.grad_norm(model.parameters())
grad_norm += norm
if global_steps % args.report_every == 0:
print("> Gradient Norm: %1.4f" % (grad_norm / (num_steps + 1)))
optimizer.step()
report_stats.update(stat)
train_stats.update(stat)
report_stats = report_func(epoch, num_steps, iter_per_epoch,
time_s, report_stats, args.report_every)
if (global_steps + 1) % args.eval_steps == 0:
dev_iter = data.iterator.pool(dev_data,
args.wbatchsize,
key=lambda x:
data.utils.interleave_keys(len(x[0]),
len(x[1])),
batch_size_fn=batch_size_func,
random_shuffler=data.iterator.
RandomShuffler())
for dev_batch in dev_iter:
model.eval()
in_arrays = utils.seq2seq_pad_concat_convert(dev_batch, -1)
loss_test, stat = model(*in_arrays)
valid_stats.update(stat)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
bleu_score, _ = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha,
max_sent=args.max_sent_eval)()
if args.metric == "bleu":
score = bleu_score
elif args.metric == "accuracy":
score = valid_stats.accuracy()
is_best = score > best_score
best_score = max(score, best_score)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_score': best_score,
'optimizer': optimizer.state_dict(),
'opts': args,
}, is_best,
args.model_file,
args.best_model_file)
# BLEU score on Dev and Test Data
checkpoint = torch.load(args.best_model_file)
print("=> loaded checkpoint '{}' (epoch {}, best score {})".
format(args.best_model_file,
checkpoint['epoch'],
checkpoint['best_score']))
model.load_state_dict(checkpoint['state_dict'])
print('Dev Set BLEU Score')
_, dev_hyp = CalculateBleu(model,
dev_data,
'Dev Bleu',
batch=args.batchsize // 4,
beam_size=args.beam_size,
alpha=args.alpha)()
| def __init__(self, model, test_data, key, batch=50, max_length=50,
beam_size=1, alpha=0.6, max_sent=None):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = -1
self.max_length = max_length
self.beam_size = beam_size
self.alpha = alpha
self.max_sent = max_sent
def __call__(self):
self.model.eval()
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend(t.tolist() for t in targets)
if self.beam_size > 1: | identifier_body |
remotenode.go | Local node is nil")
}
if conn == nil {
return nil, errors.New("conn is nil")
}
node, err := NewNode(nil, "")
if err != nil {
return nil, err
}
txMsgCache := cache.NewGoCache(txMsgCacheExpiration, txMsgCacheCleanupInterval)
remoteNode := &RemoteNode{
Node: node,
LocalNode: localNode,
conn: conn,
IsOutbound: isOutbound,
rxMsgChan: make(chan *protobuf.Message, remoteRxMsgChanLen),
txMsgChan: make(chan *protobuf.Message, remoteTxMsgChanLen),
txMsgCache: txMsgCache,
}
return remoteNode, nil
}
func (rn *RemoteNode) String() string {
if !rn.IsReady() {
return fmt.Sprintf("<%s>", rn.conn.RemoteAddr().String())
}
return fmt.Sprintf("%v<%s>", rn.Node, rn.conn.RemoteAddr().String())
}
// IsReady returns if the remote node is ready
func (rn *RemoteNode) IsReady() bool {
rn.readyLock.RLock()
defer rn.readyLock.RUnlock()
return rn.ready
}
// Start starts the runtime loop of the remote node
func (rn *RemoteNode) Start() error {
rn.StartOnce.Do(func() {
if rn.IsStopped() {
return
}
go rn.handleMsg()
go rn.rx()
go rn.tx()
go func() {
var n *protobuf.Node
var err error
for i := 0; i < startRetries; i++ {
n, err = rn.GetNode()
if err == nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Get node error: %s", err))
return
}
var existing *RemoteNode
rn.LocalNode.neighbors.Range(func(key, value interface{}) bool {
remoteNode, ok := value.(*RemoteNode)
if ok && remoteNode.IsReady() && bytes.Equal(remoteNode.Id, n.Id) {
if remoteNode.IsStopped() {
log.Warningf("Remove stopped remote node %v from list", remoteNode)
rn.LocalNode.neighbors.Delete(key)
} else {
existing = remoteNode
}
return false
}
return true
})
if existing != nil {
rn.Stop(fmt.Errorf("Node with id %x is already connected at addr %s", existing.Id, existing.conn.RemoteAddr().String()))
return
}
remoteAddr, err := transport.Parse(n.Addr)
if err != nil {
rn.Stop(fmt.Errorf("Parse node addr %s error: %s", n.Addr, err))
return
}
if remoteAddr.Host == "" {
connAddr := rn.conn.RemoteAddr().String()
remoteAddr.Host, _, err = net.SplitHostPort(connAddr)
if err != nil {
rn.Stop(fmt.Errorf("Parse conn remote addr %s error: %s", connAddr, err))
return
}
n.Addr = remoteAddr.String()
}
rn.Node.Node = n
rn.readyLock.Lock()
rn.ready = true
rn.readyLock.Unlock()
for _, f := range rn.LocalNode.middlewareStore.remoteNodeReady {
if !f(rn) {
break
}
}
}()
})
return nil
}
// Stop stops the runtime loop of the remote node
func (rn *RemoteNode) Stop(err error) {
rn.StopOnce.Do(func() {
if err != nil {
log.Warningf("Remote node %v stops because of error: %s", rn, err)
} else {
log.Infof("Remote node %v stops", rn)
}
err = rn.NotifyStop()
if err != nil {
log.Warning("Notify remote node stop error:", err)
}
time.AfterFunc(stopGracePeriod, func() {
rn.LifeCycle.Stop()
if rn.conn != nil {
rn.LocalNode.neighbors.Delete(rn.conn.RemoteAddr().String())
rn.conn.Close()
}
for _, f := range rn.LocalNode.middlewareStore.remoteNodeDisconnected {
if !f(rn) {
break
}
}
})
})
}
// handleMsg starts a loop that handles received msg
func (rn *RemoteNode) handleMsg() {
var msg *protobuf.Message
var remoteMsg *RemoteMessage
var msgChan chan *RemoteMessage
var err error
keepAliveTimeoutTimer := time.NewTimer(keepAliveTimeout)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimeoutTimer)
return
}
select {
case msg = <-rn.rxMsgChan:
remoteMsg, err = NewRemoteMessage(rn, msg)
if err != nil {
log.Error(err)
continue
}
msgChan, err = rn.LocalNode.GetRxMsgChan(msg.RoutingType)
if err != nil {
log.Error(err)
continue
}
select {
case msgChan <- remoteMsg:
default:
log.Warningf("Msg chan full for routing type %d, discarding msg", msg.RoutingType)
}
case <-keepAliveTimeoutTimer.C:
rn.Stop(errors.New("keepalive timeout"))
}
util.ResetTimer(keepAliveTimeoutTimer, keepAliveTimeout)
}
}
// handleMsgBuf unmarshal buf to msg and send it to msg chan of the local node
func (rn *RemoteNode) handleMsgBuf(buf []byte) {
msg := &protobuf.Message{}
err := proto.Unmarshal(buf, msg)
if err != nil {
rn.Stop(fmt.Errorf("unmarshal msg error: %s", err))
return
}
select {
case rn.rxMsgChan <- msg:
default:
log.Warning("Rx msg chan full, discarding msg")
}
}
// rx receives and handle data from RemoteNode rn
func (rn *RemoteNode) rx() {
msgLenBuf := make([]byte, msgLenBytes)
var readLen int
for {
if rn.IsStopped() {
return
}
l, err := rn.conn.Read(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Read msg len error: %s", err))
continue
}
if l != msgLenBytes {
rn.Stop(fmt.Errorf("Msg len has %d bytes, which is less than expected %d", l, msgLenBytes))
continue
}
msgLen := int(binary.BigEndian.Uint32(msgLenBuf))
if msgLen < 0 {
rn.Stop(fmt.Errorf("Msg len %d overflow", msgLen))
continue
}
if msgLen > maxMsgSize {
rn.Stop(fmt.Errorf("Msg size %d exceeds max msg size %d", msgLen, maxMsgSize))
continue
}
buf := make([]byte, msgLen)
for readLen = 0; readLen < msgLen; readLen += l {
l, err = rn.conn.Read(buf[readLen:])
if err != nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Read msg error: %s", err))
continue
}
if readLen > msgLen {
rn.Stop(fmt.Errorf("Msg has %d bytes, which is more than expected %d", readLen, msgLen))
continue
}
rn.handleMsgBuf(buf)
}
}
// tx marshals and sends data in txMsgChan to RemoteNode rn
func (rn *RemoteNode) tx() {
var msg *protobuf.Message
var buf []byte
var err error
msgLenBuf := make([]byte, msgLenBytes)
keepAliveTimer := time.NewTimer(keepAliveInterval)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimer)
return
}
select {
case msg = <-rn.txMsgChan:
buf, err = proto.Marshal(msg)
if err != nil {
log.Error(err)
continue
}
if len(buf) > maxMsgSize {
log.Errorf("Msg size %d exceeds max msg size %d", len(buf), maxMsgSize)
continue
}
binary.BigEndian.PutUint32(msgLenBuf, uint32(len(buf)))
_, err = rn.conn.Write(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
_, err = rn.conn.Write(buf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
case <-keepAliveTimer.C:
rn.keepAlive()
}
util.ResetTimer(keepAliveTimer, keepAliveInterval)
}
}
// SendMessage marshals and sends msg, will returns a RemoteMessage chan if hasReply is true
func (rn *RemoteNode) SendMessage(msg *protobuf.Message, hasReply bool) (<-chan *RemoteMessage, error) {
if rn.IsStopped() {
return nil, errors.New("Remote node has stopped")
}
| if len(msg.MessageId) == 0 {
return nil, errors.New("Message ID is empty")
}
| random_line_split |
|
remotenode.go |
// Max message size in bytes
maxMsgSize = 20 * 1024 * 1024
)
// RemoteNode is a remote node
type RemoteNode struct {
*Node
LocalNode *LocalNode
IsOutbound bool
conn net.Conn
rxMsgChan chan *protobuf.Message
txMsgChan chan *protobuf.Message
txMsgCache cache.Cache
ready bool
readyLock sync.RWMutex
}
// NewRemoteNode creates a remote node
func NewRemoteNode(localNode *LocalNode, conn net.Conn, isOutbound bool) (*RemoteNode, error) {
if localNode == nil {
return nil, errors.New("Local node is nil")
}
if conn == nil {
return nil, errors.New("conn is nil")
}
node, err := NewNode(nil, "")
if err != nil {
return nil, err
}
txMsgCache := cache.NewGoCache(txMsgCacheExpiration, txMsgCacheCleanupInterval)
remoteNode := &RemoteNode{
Node: node,
LocalNode: localNode,
conn: conn,
IsOutbound: isOutbound,
rxMsgChan: make(chan *protobuf.Message, remoteRxMsgChanLen),
txMsgChan: make(chan *protobuf.Message, remoteTxMsgChanLen),
txMsgCache: txMsgCache,
}
return remoteNode, nil
}
func (rn *RemoteNode) String() string {
if !rn.IsReady() {
return fmt.Sprintf("<%s>", rn.conn.RemoteAddr().String())
}
return fmt.Sprintf("%v<%s>", rn.Node, rn.conn.RemoteAddr().String())
}
// IsReady returns if the remote node is ready
func (rn *RemoteNode) IsReady() bool {
rn.readyLock.RLock()
defer rn.readyLock.RUnlock()
return rn.ready
}
// Start starts the runtime loop of the remote node
func (rn *RemoteNode) Start() error {
rn.StartOnce.Do(func() {
if rn.IsStopped() {
return
}
go rn.handleMsg()
go rn.rx()
go rn.tx()
go func() {
var n *protobuf.Node
var err error
for i := 0; i < startRetries; i++ {
n, err = rn.GetNode()
if err == nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Get node error: %s", err))
return
}
var existing *RemoteNode
rn.LocalNode.neighbors.Range(func(key, value interface{}) bool {
remoteNode, ok := value.(*RemoteNode)
if ok && remoteNode.IsReady() && bytes.Equal(remoteNode.Id, n.Id) {
if remoteNode.IsStopped() {
log.Warningf("Remove stopped remote node %v from list", remoteNode)
rn.LocalNode.neighbors.Delete(key)
} else {
existing = remoteNode
}
return false
}
return true
})
if existing != nil {
rn.Stop(fmt.Errorf("Node with id %x is already connected at addr %s", existing.Id, existing.conn.RemoteAddr().String()))
return
}
remoteAddr, err := transport.Parse(n.Addr)
if err != nil {
rn.Stop(fmt.Errorf("Parse node addr %s error: %s", n.Addr, err))
return
}
if remoteAddr.Host == "" {
connAddr := rn.conn.RemoteAddr().String()
remoteAddr.Host, _, err = net.SplitHostPort(connAddr)
if err != nil {
rn.Stop(fmt.Errorf("Parse conn remote addr %s error: %s", connAddr, err))
return
}
n.Addr = remoteAddr.String()
}
rn.Node.Node = n
rn.readyLock.Lock()
rn.ready = true
rn.readyLock.Unlock()
for _, f := range rn.LocalNode.middlewareStore.remoteNodeReady {
if !f(rn) {
break
}
}
}()
})
return nil
}
// Stop stops the runtime loop of the remote node
func (rn *RemoteNode) Stop(err error) |
for _, f := range rn.LocalNode.middlewareStore.remoteNodeDisconnected {
if !f(rn) {
break
}
}
})
})
}
// handleMsg starts a loop that handles received msg
func (rn *RemoteNode) handleMsg() {
var msg *protobuf.Message
var remoteMsg *RemoteMessage
var msgChan chan *RemoteMessage
var err error
keepAliveTimeoutTimer := time.NewTimer(keepAliveTimeout)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimeoutTimer)
return
}
select {
case msg = <-rn.rxMsgChan:
remoteMsg, err = NewRemoteMessage(rn, msg)
if err != nil {
log.Error(err)
continue
}
msgChan, err = rn.LocalNode.GetRxMsgChan(msg.RoutingType)
if err != nil {
log.Error(err)
continue
}
select {
case msgChan <- remoteMsg:
default:
log.Warningf("Msg chan full for routing type %d, discarding msg", msg.RoutingType)
}
case <-keepAliveTimeoutTimer.C:
rn.Stop(errors.New("keepalive timeout"))
}
util.ResetTimer(keepAliveTimeoutTimer, keepAliveTimeout)
}
}
// handleMsgBuf unmarshal buf to msg and send it to msg chan of the local node
func (rn *RemoteNode) handleMsgBuf(buf []byte) {
msg := &protobuf.Message{}
err := proto.Unmarshal(buf, msg)
if err != nil {
rn.Stop(fmt.Errorf("unmarshal msg error: %s", err))
return
}
select {
case rn.rxMsgChan <- msg:
default:
log.Warning("Rx msg chan full, discarding msg")
}
}
// rx receives and handle data from RemoteNode rn
func (rn *RemoteNode) rx() {
msgLenBuf := make([]byte, msgLenBytes)
var readLen int
for {
if rn.IsStopped() {
return
}
l, err := rn.conn.Read(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Read msg len error: %s", err))
continue
}
if l != msgLenBytes {
rn.Stop(fmt.Errorf("Msg len has %d bytes, which is less than expected %d", l, msgLenBytes))
continue
}
msgLen := int(binary.BigEndian.Uint32(msgLenBuf))
if msgLen < 0 {
rn.Stop(fmt.Errorf("Msg len %d overflow", msgLen))
continue
}
if msgLen > maxMsgSize {
rn.Stop(fmt.Errorf("Msg size %d exceeds max msg size %d", msgLen, maxMsgSize))
continue
}
buf := make([]byte, msgLen)
for readLen = 0; readLen < msgLen; readLen += l {
l, err = rn.conn.Read(buf[readLen:])
if err != nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Read msg error: %s", err))
continue
}
if readLen > msgLen {
rn.Stop(fmt.Errorf("Msg has %d bytes, which is more than expected %d", readLen, msgLen))
continue
}
rn.handleMsgBuf(buf)
}
}
// tx marshals and sends data in txMsgChan to RemoteNode rn
func (rn *RemoteNode) tx() {
var msg *protobuf.Message
var buf []byte
var err error
msgLenBuf := make([]byte, msgLenBytes)
keepAliveTimer := time.NewTimer(keepAliveInterval)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimer)
return
}
select {
case msg = <-rn.txMsgChan:
buf, err = proto.Marshal(msg)
if err != nil {
log.Error(err)
continue
}
if len(buf) > maxMsgSize {
log.Errorf("Msg size %d exceeds max msg size %d", len(buf), maxMsgSize)
continue
}
binary.BigEndian.PutUint32(msgLenBuf, uint32(len(buf)))
_, err = rn.conn.Write(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
_, err = rn.conn.Write | {
rn.StopOnce.Do(func() {
if err != nil {
log.Warningf("Remote node %v stops because of error: %s", rn, err)
} else {
log.Infof("Remote node %v stops", rn)
}
err = rn.NotifyStop()
if err != nil {
log.Warning("Notify remote node stop error:", err)
}
time.AfterFunc(stopGracePeriod, func() {
rn.LifeCycle.Stop()
if rn.conn != nil {
rn.LocalNode.neighbors.Delete(rn.conn.RemoteAddr().String())
rn.conn.Close()
} | identifier_body |
remotenode.go | remoteNode, ok := value.(*RemoteNode)
if ok && remoteNode.IsReady() && bytes.Equal(remoteNode.Id, n.Id) {
if remoteNode.IsStopped() {
log.Warningf("Remove stopped remote node %v from list", remoteNode)
rn.LocalNode.neighbors.Delete(key)
} else {
existing = remoteNode
}
return false
}
return true
})
if existing != nil {
rn.Stop(fmt.Errorf("Node with id %x is already connected at addr %s", existing.Id, existing.conn.RemoteAddr().String()))
return
}
remoteAddr, err := transport.Parse(n.Addr)
if err != nil {
rn.Stop(fmt.Errorf("Parse node addr %s error: %s", n.Addr, err))
return
}
if remoteAddr.Host == "" {
connAddr := rn.conn.RemoteAddr().String()
remoteAddr.Host, _, err = net.SplitHostPort(connAddr)
if err != nil {
rn.Stop(fmt.Errorf("Parse conn remote addr %s error: %s", connAddr, err))
return
}
n.Addr = remoteAddr.String()
}
rn.Node.Node = n
rn.readyLock.Lock()
rn.ready = true
rn.readyLock.Unlock()
for _, f := range rn.LocalNode.middlewareStore.remoteNodeReady {
if !f(rn) {
break
}
}
}()
})
return nil
}
// Stop stops the runtime loop of the remote node
func (rn *RemoteNode) Stop(err error) {
rn.StopOnce.Do(func() {
if err != nil {
log.Warningf("Remote node %v stops because of error: %s", rn, err)
} else {
log.Infof("Remote node %v stops", rn)
}
err = rn.NotifyStop()
if err != nil {
log.Warning("Notify remote node stop error:", err)
}
time.AfterFunc(stopGracePeriod, func() {
rn.LifeCycle.Stop()
if rn.conn != nil {
rn.LocalNode.neighbors.Delete(rn.conn.RemoteAddr().String())
rn.conn.Close()
}
for _, f := range rn.LocalNode.middlewareStore.remoteNodeDisconnected {
if !f(rn) {
break
}
}
})
})
}
// handleMsg starts a loop that handles received msg
func (rn *RemoteNode) handleMsg() {
var msg *protobuf.Message
var remoteMsg *RemoteMessage
var msgChan chan *RemoteMessage
var err error
keepAliveTimeoutTimer := time.NewTimer(keepAliveTimeout)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimeoutTimer)
return
}
select {
case msg = <-rn.rxMsgChan:
remoteMsg, err = NewRemoteMessage(rn, msg)
if err != nil {
log.Error(err)
continue
}
msgChan, err = rn.LocalNode.GetRxMsgChan(msg.RoutingType)
if err != nil {
log.Error(err)
continue
}
select {
case msgChan <- remoteMsg:
default:
log.Warningf("Msg chan full for routing type %d, discarding msg", msg.RoutingType)
}
case <-keepAliveTimeoutTimer.C:
rn.Stop(errors.New("keepalive timeout"))
}
util.ResetTimer(keepAliveTimeoutTimer, keepAliveTimeout)
}
}
// handleMsgBuf unmarshal buf to msg and send it to msg chan of the local node
func (rn *RemoteNode) handleMsgBuf(buf []byte) {
msg := &protobuf.Message{}
err := proto.Unmarshal(buf, msg)
if err != nil {
rn.Stop(fmt.Errorf("unmarshal msg error: %s", err))
return
}
select {
case rn.rxMsgChan <- msg:
default:
log.Warning("Rx msg chan full, discarding msg")
}
}
// rx receives and handle data from RemoteNode rn
func (rn *RemoteNode) rx() {
msgLenBuf := make([]byte, msgLenBytes)
var readLen int
for {
if rn.IsStopped() {
return
}
l, err := rn.conn.Read(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Read msg len error: %s", err))
continue
}
if l != msgLenBytes {
rn.Stop(fmt.Errorf("Msg len has %d bytes, which is less than expected %d", l, msgLenBytes))
continue
}
msgLen := int(binary.BigEndian.Uint32(msgLenBuf))
if msgLen < 0 {
rn.Stop(fmt.Errorf("Msg len %d overflow", msgLen))
continue
}
if msgLen > maxMsgSize {
rn.Stop(fmt.Errorf("Msg size %d exceeds max msg size %d", msgLen, maxMsgSize))
continue
}
buf := make([]byte, msgLen)
for readLen = 0; readLen < msgLen; readLen += l {
l, err = rn.conn.Read(buf[readLen:])
if err != nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Read msg error: %s", err))
continue
}
if readLen > msgLen {
rn.Stop(fmt.Errorf("Msg has %d bytes, which is more than expected %d", readLen, msgLen))
continue
}
rn.handleMsgBuf(buf)
}
}
// tx marshals and sends data in txMsgChan to RemoteNode rn
func (rn *RemoteNode) tx() {
var msg *protobuf.Message
var buf []byte
var err error
msgLenBuf := make([]byte, msgLenBytes)
keepAliveTimer := time.NewTimer(keepAliveInterval)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimer)
return
}
select {
case msg = <-rn.txMsgChan:
buf, err = proto.Marshal(msg)
if err != nil {
log.Error(err)
continue
}
if len(buf) > maxMsgSize {
log.Errorf("Msg size %d exceeds max msg size %d", len(buf), maxMsgSize)
continue
}
binary.BigEndian.PutUint32(msgLenBuf, uint32(len(buf)))
_, err = rn.conn.Write(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
_, err = rn.conn.Write(buf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
case <-keepAliveTimer.C:
rn.keepAlive()
}
util.ResetTimer(keepAliveTimer, keepAliveInterval)
}
}
// SendMessage marshals and sends msg, will returns a RemoteMessage chan if hasReply is true
func (rn *RemoteNode) SendMessage(msg *protobuf.Message, hasReply bool) (<-chan *RemoteMessage, error) {
if rn.IsStopped() {
return nil, errors.New("Remote node has stopped")
}
if len(msg.MessageId) == 0 {
return nil, errors.New("Message ID is empty")
}
_, found := rn.txMsgCache.Get(msg.MessageId)
if found {
return nil, nil
}
err := rn.txMsgCache.Add(msg.MessageId, struct{}{})
if err != nil {
return nil, err
}
select {
case rn.txMsgChan <- msg:
default:
return nil, errors.New("Tx msg chan full, discarding msg")
}
if hasReply {
return rn.LocalNode.AllocReplyChan(msg.MessageId)
}
return nil, nil
}
// SendMessageAsync sends msg and returns if there is an error
func (rn *RemoteNode) SendMessageAsync(msg *protobuf.Message) error {
_, err := rn.SendMessage(msg, false)
return err
}
// SendMessageSync sends msg, returns reply message or error if timeout
func (rn *RemoteNode) SendMessageSync(msg *protobuf.Message) (*RemoteMessage, error) {
replyChan, err := rn.SendMessage(msg, true)
if err != nil {
return nil, err
}
select {
case replyMsg := <-replyChan:
return replyMsg, nil
case <-time.After(replyTimeout):
return nil, errors.New("Wait for reply timeout")
}
}
func (rn *RemoteNode) keepAlive() error {
msg, err := NewPingMessage()
if err != nil {
return err
}
err = rn.SendMessageAsync(msg)
if err != nil {
return err
}
return nil
}
// Ping sends a Ping message to remote node and wait for reply
func (rn *RemoteNode) Ping() error {
msg, err := NewPingMessage()
if err != nil {
return err
}
_, err = rn.SendMessageSync(msg)
if err != nil {
return err
}
return nil
}
// GetNode sends a GetNode message to remote node and wait for reply
func (rn *RemoteNode) GetNode() (*protobuf.Node, error) {
msg, err := NewGetNodeMessage()
if err != nil | {
return nil, err
} | conditional_block |
|
remotenode.go |
// Max message size in bytes
maxMsgSize = 20 * 1024 * 1024
)
// RemoteNode is a remote node
type RemoteNode struct {
*Node
LocalNode *LocalNode
IsOutbound bool
conn net.Conn
rxMsgChan chan *protobuf.Message
txMsgChan chan *protobuf.Message
txMsgCache cache.Cache
ready bool
readyLock sync.RWMutex
}
// NewRemoteNode creates a remote node
func NewRemoteNode(localNode *LocalNode, conn net.Conn, isOutbound bool) (*RemoteNode, error) {
if localNode == nil {
return nil, errors.New("Local node is nil")
}
if conn == nil {
return nil, errors.New("conn is nil")
}
node, err := NewNode(nil, "")
if err != nil {
return nil, err
}
txMsgCache := cache.NewGoCache(txMsgCacheExpiration, txMsgCacheCleanupInterval)
remoteNode := &RemoteNode{
Node: node,
LocalNode: localNode,
conn: conn,
IsOutbound: isOutbound,
rxMsgChan: make(chan *protobuf.Message, remoteRxMsgChanLen),
txMsgChan: make(chan *protobuf.Message, remoteTxMsgChanLen),
txMsgCache: txMsgCache,
}
return remoteNode, nil
}
func (rn *RemoteNode) String() string {
if !rn.IsReady() {
return fmt.Sprintf("<%s>", rn.conn.RemoteAddr().String())
}
return fmt.Sprintf("%v<%s>", rn.Node, rn.conn.RemoteAddr().String())
}
// IsReady returns if the remote node is ready
func (rn *RemoteNode) IsReady() bool {
rn.readyLock.RLock()
defer rn.readyLock.RUnlock()
return rn.ready
}
// Start starts the runtime loop of the remote node
func (rn *RemoteNode) Start() error {
rn.StartOnce.Do(func() {
if rn.IsStopped() {
return
}
go rn.handleMsg()
go rn.rx()
go rn.tx()
go func() {
var n *protobuf.Node
var err error
for i := 0; i < startRetries; i++ {
n, err = rn.GetNode()
if err == nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Get node error: %s", err))
return
}
var existing *RemoteNode
rn.LocalNode.neighbors.Range(func(key, value interface{}) bool {
remoteNode, ok := value.(*RemoteNode)
if ok && remoteNode.IsReady() && bytes.Equal(remoteNode.Id, n.Id) {
if remoteNode.IsStopped() {
log.Warningf("Remove stopped remote node %v from list", remoteNode)
rn.LocalNode.neighbors.Delete(key)
} else {
existing = remoteNode
}
return false
}
return true
})
if existing != nil {
rn.Stop(fmt.Errorf("Node with id %x is already connected at addr %s", existing.Id, existing.conn.RemoteAddr().String()))
return
}
remoteAddr, err := transport.Parse(n.Addr)
if err != nil {
rn.Stop(fmt.Errorf("Parse node addr %s error: %s", n.Addr, err))
return
}
if remoteAddr.Host == "" {
connAddr := rn.conn.RemoteAddr().String()
remoteAddr.Host, _, err = net.SplitHostPort(connAddr)
if err != nil {
rn.Stop(fmt.Errorf("Parse conn remote addr %s error: %s", connAddr, err))
return
}
n.Addr = remoteAddr.String()
}
rn.Node.Node = n
rn.readyLock.Lock()
rn.ready = true
rn.readyLock.Unlock()
for _, f := range rn.LocalNode.middlewareStore.remoteNodeReady {
if !f(rn) {
break
}
}
}()
})
return nil
}
// Stop stops the runtime loop of the remote node
func (rn *RemoteNode) | (err error) {
rn.StopOnce.Do(func() {
if err != nil {
log.Warningf("Remote node %v stops because of error: %s", rn, err)
} else {
log.Infof("Remote node %v stops", rn)
}
err = rn.NotifyStop()
if err != nil {
log.Warning("Notify remote node stop error:", err)
}
time.AfterFunc(stopGracePeriod, func() {
rn.LifeCycle.Stop()
if rn.conn != nil {
rn.LocalNode.neighbors.Delete(rn.conn.RemoteAddr().String())
rn.conn.Close()
}
for _, f := range rn.LocalNode.middlewareStore.remoteNodeDisconnected {
if !f(rn) {
break
}
}
})
})
}
// handleMsg starts a loop that handles received msg
func (rn *RemoteNode) handleMsg() {
var msg *protobuf.Message
var remoteMsg *RemoteMessage
var msgChan chan *RemoteMessage
var err error
keepAliveTimeoutTimer := time.NewTimer(keepAliveTimeout)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimeoutTimer)
return
}
select {
case msg = <-rn.rxMsgChan:
remoteMsg, err = NewRemoteMessage(rn, msg)
if err != nil {
log.Error(err)
continue
}
msgChan, err = rn.LocalNode.GetRxMsgChan(msg.RoutingType)
if err != nil {
log.Error(err)
continue
}
select {
case msgChan <- remoteMsg:
default:
log.Warningf("Msg chan full for routing type %d, discarding msg", msg.RoutingType)
}
case <-keepAliveTimeoutTimer.C:
rn.Stop(errors.New("keepalive timeout"))
}
util.ResetTimer(keepAliveTimeoutTimer, keepAliveTimeout)
}
}
// handleMsgBuf unmarshal buf to msg and send it to msg chan of the local node
func (rn *RemoteNode) handleMsgBuf(buf []byte) {
msg := &protobuf.Message{}
err := proto.Unmarshal(buf, msg)
if err != nil {
rn.Stop(fmt.Errorf("unmarshal msg error: %s", err))
return
}
select {
case rn.rxMsgChan <- msg:
default:
log.Warning("Rx msg chan full, discarding msg")
}
}
// rx receives and handle data from RemoteNode rn
func (rn *RemoteNode) rx() {
msgLenBuf := make([]byte, msgLenBytes)
var readLen int
for {
if rn.IsStopped() {
return
}
l, err := rn.conn.Read(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Read msg len error: %s", err))
continue
}
if l != msgLenBytes {
rn.Stop(fmt.Errorf("Msg len has %d bytes, which is less than expected %d", l, msgLenBytes))
continue
}
msgLen := int(binary.BigEndian.Uint32(msgLenBuf))
if msgLen < 0 {
rn.Stop(fmt.Errorf("Msg len %d overflow", msgLen))
continue
}
if msgLen > maxMsgSize {
rn.Stop(fmt.Errorf("Msg size %d exceeds max msg size %d", msgLen, maxMsgSize))
continue
}
buf := make([]byte, msgLen)
for readLen = 0; readLen < msgLen; readLen += l {
l, err = rn.conn.Read(buf[readLen:])
if err != nil {
break
}
}
if err != nil {
rn.Stop(fmt.Errorf("Read msg error: %s", err))
continue
}
if readLen > msgLen {
rn.Stop(fmt.Errorf("Msg has %d bytes, which is more than expected %d", readLen, msgLen))
continue
}
rn.handleMsgBuf(buf)
}
}
// tx marshals and sends data in txMsgChan to RemoteNode rn
func (rn *RemoteNode) tx() {
var msg *protobuf.Message
var buf []byte
var err error
msgLenBuf := make([]byte, msgLenBytes)
keepAliveTimer := time.NewTimer(keepAliveInterval)
for {
if rn.IsStopped() {
util.StopTimer(keepAliveTimer)
return
}
select {
case msg = <-rn.txMsgChan:
buf, err = proto.Marshal(msg)
if err != nil {
log.Error(err)
continue
}
if len(buf) > maxMsgSize {
log.Errorf("Msg size %d exceeds max msg size %d", len(buf), maxMsgSize)
continue
}
binary.BigEndian.PutUint32(msgLenBuf, uint32(len(buf)))
_, err = rn.conn.Write(msgLenBuf)
if err != nil {
rn.Stop(fmt.Errorf("Write to conn error: %s", err))
continue
}
_, err = rn.conn.Write(buf | Stop | identifier_name |
neurosky_ecg.py | #parse length byte
while True:
pLength = ord(self.ser.read(1))
if pLength != SYNC_BYTE:
break
if pLength > 169:
continue
#print "L: %i" % pLength
# collect payload bytes
payload = self.ser.read(pLength)
payload = [ord(x) for x in payload] #convert to int from string
#print "payload: " + str(payload).strip('[]')
# ones complement inverse of 8-bit payload sum
checksum = sum(payload) & 0xFF
checksum = ~checksum & 0xFF
# catch and verify checksum byte
chk = ord(self.ser.read(1))
#print "chk: " + str(checksum)
if chk != checksum:
print "checksum error, %i != %i" % (chk, checksum)
continue
output = self._parseData(payload)
lead_status = next(( d for d in output if 'leadoff' in d), None)
if lead_status is not None:
if cur_leadstatus != lead_status['leadoff']:
#we have a change
if lead_status['leadoff']==200:
print "LEAD ON"
elif lead_status['leadoff']==0:
print "LEAD OFF"
cur_leadstatus = lead_status['leadoff']
# store the output data in a queue
# first, create a tuple with the sample index and dict with the timestamp and ecg
ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)
if ecgdict is not None and sample_count>self.Fs*2:
#let's just ignore the first 2 seconds of crappy data
ecgdict[1]['leadoff'] = cur_leadstatus
#print ecgdict[1]
self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys
return
def isBufferEmpty(self):
""" check to see if ecg buffer is empty """
return self.ecg_buffer.empty()
def popBuffer(self):
""" get first value (dict) in the ecg_buffer """
return self.ecg_buffer.get()
def _ecgInitAlgLib(self,libname='TgEcgAlg64.dll', power_frequency=60):
""" initialize the TgEcg algorithm dll """
if sys.maxsize > (2**32)/2-1: #running 64 bit
print "loading Neurosky tg_ecg library, 64 bit"
libname = 'TgEcgAlg64.dll'
else:
print "loading Neurosky tg_ecg library, 32 bit"
#libname = 'TgEcgAlg.dll'
libname = 'tg_ecg.so'
print "loading analysis library: ", libname
E = cdll.LoadLibrary(libname)
E.tg_ecg_do_hrv_sdnn(0)
E.tg_ecg_do_relaxation_level(0)
E.tg_ecg_do_respiratory_rate(0)
E.tg_ecg_do_rri_precise(0)
E.tg_ecg_set_power_line_freq(power_frequency)
E.tg_ecg_get_raw_smoothed.restype = c_double
E.tg_ecg_init() # init the library with selected options
return E
def ecgResetAlgLib(self):
""" reset ecg algorithm """
print "resetting ecg analysis library"
self.analyze.tg_ecg_init()
self.starttime = None
self.curtime = None
def getTotalNumRRI(self):
"""
return the total number of RRIs held in the algorithm buffer
"""
return self.analyze.tg_ecg_get_total_rri_count()
def ecgalgAnalyzeRaw(self, D, nHRV=30): #, dataqueue):
"""
test to see if we have values in the ecg_buffer, and if so, pass
the most recent raw_ecg value into the TgEcg analysis framework
Returns dict with timestamp, filtered ECG, HR, and HRV, if available
This function expects a dict as input, with keys
"""
#D = self.popBuffer()
self.analyze.tg_ecg_update(D['ecg_raw'])
#ecg_filt = self.analyze.tg_ecg_get_raw_filtered() #delayed against raw by 211 samples
ecg_filt = self.analyze.tg_ecg_get_raw_smoothed() #delayed against raw by 450 samples, if 60Hz powerline
D['ecg_filt'] = ecg_filt
if self.analyze.tg_ecg_is_r_peak():
#print "found peak"
num_rri = self.analyze.tg_ecg_get_total_rri_count()
rri = self.analyze.tg_ecg_get_rri()
hr = self.analyze.tg_ecg_compute_hr_now()
D['rri']= rri
D['hr'] = hr
print "%i HR: %i (rri: %i)" % (num_rri, 60000* 1/rri, rri)
if num_rri>=15 and num_rri < nHRV:
# slowly increase number of RRIs in HRV calculation until we reach nHRV
# This is equivalen to starting with a window of 15 RRIs and increasing the window length to max=nHRV
nHRV = num_rri
if num_rri >= nHRV and (num_rri+2) % self.HRV_UPDATE == 0:
#calculate every HRV_UPDATE heartbeats, starting at nHRV (window increases from 15 to 30)
hrv = self.analyze.tg_ecg_compute_hrv(nHRV)
D['hrv'] = hrv
print "hrv: " + str(hrv)
return D
if __name__ == "__main__":
"""
all of the code below is used for visualization and testing of the ECG framework
not to be used as production code, but can be used for examples on how to use
the NSK framework
"""
import numpy as np
#from matplotlib import pyplot as plt
import pylab as plt
# hack to get interactive plot working
# https://github.com/matplotlib/matplotlib/issues/3505
sys.ps1 = 'IAMAHACK'
target_port = 'COM3' #production windows box
#target_port = 'COM8' #mike's laptop
plot_fig=True
ecgdict = []
try:
nskECG = NeuroskyECG(target_port)
except serial.serialutil.SerialException:
print "Could not open target serial port: %s" % target_port
sys.exit(1)
nskECG.start()
if plot_fig:
plt.ion()
#load the queues to plot
# t = [ x/nskECG.Fs for x in range(0,nskECG.Fs*1)]
# ecgval = [0]*nskECG.Fs*1
t=[time.time()]
ecgval =[0]
#set up the test plot
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,1,1) #smoothed ECG
#ecgtrace, = plt.plot(0,0)
ecgtrace, = plt.plot(t,ecgval)
ax1.set_ylim((-10000, 10000))
ax2 = fig.add_subplot(2,1,2) # HRV
hrvtrace, = ax2.plot(t,[0])
ax2.set_ylim((0, 300))
time.sleep(0.1)
##########################################
sample_count = 0
leadoff_count = 0
isreset=False
while True:
if not nskECG.isBufferEmpty():
sample_count+=1
#print "buffer len", nskECG.ecg_buffer.qsize()
D = nskECG.popBuffer()
# ignore data prior to leadoff
if D['leadoff']==0 and sample_count > nskECG.Fs*2:
leadoff_count+=1
#print "leadoff", D['leadoff']
if leadoff_count>nskECG.Fs*2: #more than 2 seconds of leadoff, drop them
#if not isreset: # we haven't reset recently, DO IT
if nskECG.analyze.tg_ecg_get_total_rri_count()!=0:
isreset = True
ecgdict = [] #reset the buffer
nskECG.ecgResetAlgLib()
print "num rri post reset", nskECG.analyze.tg_ecg_get_total_rri_count()
nskECG.ecg_buffer.task_done()
continue
else: # leadoff==200, or lead on
#print "done resetting, loading data again"
leadoff_count=0 | random_line_split |
||
neurosky_ecg.py | t1.daemon = True
t1.start()
print "Started CardioChip reader"
def check(self):
""" checks if thread currently exists """
return self.connected
def stop(self):
|
def setHRVUpdate(self, numRRI):
"""
set the number of RR intervals to count
between updating the HRV value
"""
self.HRV_UPDATE = numRRI
def _parseData(self, payload):
"""
given the byte payload from the serial connection, parse the first byte
as the code and return a list of dicts of all values found in the packet
dicts will be of the format: {'timestamp', t, <codename>: codeval}
Timestamps are based on the first raw_ecg data received on the host computer, and
extrapolated using a sample frequency of 512 Hz from there. This is accurate in the short term,
but should not be used for longer (>10 min) recordings.
"""
out=[]
bytesParsed = 0
while bytesParsed < len(payload):
#check for the extended Code Level, code and length
#count the number of EXCODE_BYTE
#extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )
#bytesParsed += extendedCodeLevel
#identify the length of the expected bytes in the payload
code = payload[bytesParsed]
bytesParsed +=1
if code > 0x7F:
# multi-byte code, length > 1
length = payload[bytesParsed]
bytesParsed +=1
else:
length = 1
if code == SENSOR_STATUS:
# value of 0==no contact, 200==contact
#print "leadoff: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )
bytesParsed +=1
elif code == HEART_RATE:
#print "HR: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )
bytesParsed +=1
elif code == CONFIG_BYTE:
#print "config: %i" % payload[bytesParsed]
out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )
bytesParsed +=1
elif code == RAW_ECG:
# raw value is between -32768 and 32767, in twos compliment form
# if the raw value is higher than 32768, it should be rolled around to allow for negative values
raw = payload[bytesParsed]*256 + payload[bytesParsed]
if raw >= 32768:
raw = raw - 65536
#print "ecg: %i" % ecg
# create the timestamp on each ECG sample, starting from the first
if self.starttime is None:
self.starttime = time.time()
self.curtime = self.starttime
else:
self.curtime = self.curtime + 1./self.Fs
out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )
bytesParsed += length
elif code == DEBUG_1:
#print "debug1: " + str(payload[bytesParsed:]).strip('[]')
out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )
bytesParsed += length
elif code == DEBUG_2:
#print "debug2: " + str(payload[bytesParsed:]).strip('[]')
out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )
bytesParsed += length
else:
print "unknown code: %i" % code
return out
def _read_cardiochip(self):
"""
read data packets from the cardiochip starter kit, via the bluetooth serial port
"""
cur_leadstatus = 0
sample_count =0
while self.connected:
sample_count+=1
#check for sync bytes
readbyte = ord(self.ser.read(1))
#print readbyte, SYNC_BYTE
if readbyte != SYNC_BYTE:
continue
readbyte = ord(self.ser.read(1))
if readbyte != SYNC_BYTE:
continue
#parse length byte
while True:
pLength = ord(self.ser.read(1))
if pLength != SYNC_BYTE:
break
if pLength > 169:
continue
#print "L: %i" % pLength
# collect payload bytes
payload = self.ser.read(pLength)
payload = [ord(x) for x in payload] #convert to int from string
#print "payload: " + str(payload).strip('[]')
# ones complement inverse of 8-bit payload sum
checksum = sum(payload) & 0xFF
checksum = ~checksum & 0xFF
# catch and verify checksum byte
chk = ord(self.ser.read(1))
#print "chk: " + str(checksum)
if chk != checksum:
print "checksum error, %i != %i" % (chk, checksum)
continue
output = self._parseData(payload)
lead_status = next(( d for d in output if 'leadoff' in d), None)
if lead_status is not None:
if cur_leadstatus != lead_status['leadoff']:
#we have a change
if lead_status['leadoff']==200:
print "LEAD ON"
elif lead_status['leadoff']==0:
print "LEAD OFF"
cur_leadstatus = lead_status['leadoff']
# store the output data in a queue
# first, create a tuple with the sample index and dict with the timestamp and ecg
ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)
if ecgdict is not None and sample_count>self.Fs*2:
#let's just ignore the first 2 seconds of crappy data
ecgdict[1]['leadoff'] = cur_leadstatus
#print ecgdict[1]
self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys
return
def isBufferEmpty(self):
""" check to see if ecg buffer is empty """
return self.ecg_buffer.empty()
def popBuffer(self):
""" get first value (dict) in the ecg_buffer """
return self.ecg_buffer.get()
def _ecgInitAlgLib(self,libname='TgEcgAlg64.dll', power_frequency=60):
""" initialize the TgEcg algorithm dll """
if sys.maxsize > (2**32)/2-1: #running 64 bit
print "loading Neurosky tg_ecg library, 64 bit"
libname = 'TgEcgAlg64.dll'
else:
print "loading Neurosky tg_ecg library, 32 bit"
#libname = 'TgEcgAlg.dll'
libname = 'tg_ecg.so'
print "loading analysis library: ", libname
E = cdll.LoadLibrary(libname)
E.tg_ecg_do_hrv_sdnn(0)
E.tg_ecg_do_relaxation_level(0)
E.tg_ecg_do_respiratory_rate(0)
E.tg_ecg_do_rri_precise(0)
E.tg_ecg_set_power_line_freq(power_frequency)
E.tg_ecg_get_raw_smoothed.restype = c_double
E.tg_ecg_init() # init the library with selected options
return E
def ecgResetAlgLib(self):
""" reset ecg algorithm """
print "resetting ecg analysis library"
self.analyze.tg_ecg_init()
self.starttime = None
self.curtime = None
def getTotalNumRRI(self):
"""
return the total number of RRIs held in the algorithm buffer
"""
return self.analyze.tg_ecg_get_total_rri_count()
def ecgalgAnalyzeRaw(self, D, nHRV=30): #, dataqueue):
"""
test to see if we have values in the ecg_buffer, and if so, pass
the most recent raw_ecg value into the TgEcg analysis framework
Returns dict with timestamp, filtered ECG, HR, and HRV, if available
This function expects a dict as input, with keys
"""
#D = self.popBuffer()
self.analyze.tg_ecg_update(D['ecg_raw'])
#ecg_filt = self.analyze.tg_ecg_get_raw_filtered() #delayed against raw by 211 samples
ecg_filt = self.analyze.tg_ecg_get_raw_smoothed() #delayed against raw by 450 samples, if 60Hz powerline
D['ecg_filt'] = ecg_filt
if self.analyze.tg_ecg_is_r_peak():
#print " | """ stops running thread """
self.connected = False | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.