file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lstm.py | '''
Created on 18-Nov-2019
@author: 91984
'''
import pandas as pd
import numpy as np
import sys
# from datetime import datetime
import statsmodels.api as sm
import matplotlib.pylab as plt
from audioop import rms
# df = pd.read_csv('C:\\Users\\91984\\Desktop\\shampoo.csv')
from datetime import datetime
from pandas.plotting import register_matplotlib_converters
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
register_matplotlib_converters()
import mysql.connector
from mysql.connector import Error
import pandas as pd
connection = mysql.connector.connect(host='localhost',
database='dessertation',
user='root',
password='123456',
auth_plugin='mysql_native_password')
# print ('Argument List:', str(sys.argv[1]))
id = int(sys.argv[1])
# id=1
sql_select_Query = "SELECT sql_query FROM querytable where id=%s;"
cursor = connection.cursor(buffered=True)
cursor.execute(sql_select_Query, (id,))
record = cursor.fetchone()
for row in record:
sql_select_Query = row
print(row)
# print(sql_select_Query)
# sql_select_Query="SELECT concat( year,Month) as Date , unit_price as data FROM oildata"
df = pd.read_sql(sql_select_Query, connection);
columnsNamesArr = df.columns.values
listOfColumnNames = list(columnsNamesArr)
print(listOfColumnNames)
print(len(listOfColumnNames))
for y in range(1, len(listOfColumnNames)):
df1=df.iloc[:,[0,y]]
df1[listOfColumnNames[0]] = pd.to_datetime(df.iloc[:, 0], format='%Y-%m')
print( df1[listOfColumnNames[y]][:2])
# df['Date'] = pd.to_datetime(df['Date'])
df1.set_index(listOfColumnNames[0], inplace=True)
#
data = df1.sort_index(ascending=True, axis=0)
from pmdarima.arima import auto_arima
# split into train and test sets
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[listOfColumnNames[y]][:train_size]
valid = data[listOfColumnNames[y]][train_size:]
valid.is_copy = False
print(len(train), len(valid))
# training = train[listOfColumnNames[y]]
# validation = valid[listOfColumnNames[y]]
#
# model = auto_arima(training, start_p=1, start_q=1,max_p=3, max_q=3, m=12,start_P=0, seasonal=False,d=1, D=1, trace=True,error_action='ignore',suppress_warnings=True)
# # model = auto_arima(training,seasonal=True,trace=True,error_action='ignore',suppress_warnings=True)
# model.fit(training)
# forecast = model.predict(n_periods=test_size)
# # rms=np.sqrt(np.mean(np.power((np.array(valid['Close'])-np.array(forecast['Prediction'])),2)))
# last_row = df.iloc[-1]
# print(last_row)
#
# last_date = pd.DataFrame()
#
# last_date['Predictions'] = 0
#
# # last_date['Date'] = pd.date_range(last_row['Date'], periods = 12, freq ='M')
# # last_date.set_index('Date',inplace=True)
# # print(last_date)
# # print( last_date.index)
# forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
# plt.plot(train['data'])
# plt.plot(valid['data'])
# print(forecast)
# plt.plot(forecast['Prediction'])
# plt.show()
#
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
print(data)
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[0:train_size]
valid = data[train_size:]
# converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
x_train, y_train = [], []
for i in range(6, len(train)):
x_train.append(scaled_data[i - 6:i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
def get_val():
X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
valX, valY = get_val()
# create and fit the LSTM network
from pandas import DataFrame
train1 = DataFrame()
val1 = DataFrame()
# for i in range(5):
model = Sequential()
model.add(LSTM(units=300, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(units=25))
model.add(Dropout(0.15))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history_callback = model.fit(x_train, y_train, epochs=80, batch_size=12, validation_data=(valX, valY), verbose=1)
loss_history = history_callback.history["loss"]
train1[str(i)] = pd.Series(history_callback.history['loss'])
val1[str(i)] = pd.Series(history_callback.history['val_loss'])
# plot train and validation loss across multiple runs
plt.plot(train1, color='blue', label='train')
plt.plot(val1, color='orange', label='validation')
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# predicting 246 values, using past 60 from the train data
inputs = data[len(data) - len(valid) - 6:].values
inputs = inputs.reshape(-1, 1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(6, inputs.shape[0]):
X_test.append(inputs[i - 6:i, 0])
X_test = np.array(X_test)
print("-----------------")
print(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
closing_price = model.predict(X_test)
print("----------------+++==-")
print(len(closing_price))
closing_price = scaler.inverse_transform(closing_price)
rms = np.sqrt(np.mean(np.power((valid - closing_price), 2)))
print(rms)
train = data[:train_size]
valid = data[train_size:]
valid['Predictions'] = closing_price
# last_date['Predictions']=closing_price
plt.plot(train[listOfColumnNames[y]])
plt.plot(valid[[listOfColumnNames[y], 'Predictions']])
plt.show()
# save the model to disk.=
import pickle
dateTimeObj = datetime.now()
date_time = dateTimeObj.strftime("%m-%d-%Y_%H-%M-%S")
filename = "Query_"+str(id)+"_ p_value_"+str(y)+"_"+date_time
pickle.dump(model, open(filename, 'wb'))
print(valid[[listOfColumnNames[y], 'Predictions']])
mySql_insert_query = "INSERT INTO prediction_model (sql_id, p_value_"+str(y)+") VALUES ("+str(id)+",'"+filename+"')ON DUPLICATE KEY UPDATE p_value_"+str(y)+"='"+filename+"';"
cursor.execute(mySql_insert_query)
connection.commit()
connection.commit()
###############
# train = df
# print(train)
# from keras.preprocessing.sequence import TimeseriesGenerator
# scaler.fit(train)
# train = scaler.transform(train)
# n_input = 6
# n_features = 1
# generator = TimeseriesGenerator(train, train, length=n_input, batch_size=12)
# model.fit_generator(generator,epochs=30)
# pred_list = []
# batch = train[-n_input:].reshape((1, n_input, n_features))
# for i in range(n_input):
# pred_list.append(model.predict(batch)[0])
# batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
#
#
# from pandas.tseries.offsets import DateOffset
# add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
#
# df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
| # print(df_proj)
#
# plt.figure(figsize=(20, 5))
# plt.plot(df_proj.index, df_proj['data'])
# plt.plot(df_proj.index, df_proj['Prediction'], color='r')
# plt.legend(loc='best', fontsize='xx-large')
# plt.xticks(fontsize=18)
# plt.yticks(fontsize=16)
# plt.show()
# #
# # scaler = MinMaxScaler(feature_range=(0, 1))
# # train = scaler.fit_transform(dataset)
# # scaler.fit(train)
# # train = scaler.transform(train)
# # n_input = 6
# # n_features = 1
# # from keras.preprocessing.sequence import TimeseriesGenerator
# #
# # pred_list = []
# #
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# #
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),index=df[-n_input:].index, columns=['Prediction'])
# # df_test = pd.concat([df,df_predict], axis=1)
# #
# #
# # generator = TimeseriesGenerator(train, train, length=n_input, batch_size=6)
# # model.fit_generator(generator,epochs=25)
# # pred_list = []
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # from pandas.tseries.offsets import DateOffset
# # add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# # future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
# #
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# # index=future_dates[-n_input:].index, columns=['Prediction'])
# #
# # valid = pd.concat([df,df_predict], axis=1)
# #
# # print(valid)
#
# plt.plot(df['data'])
# plt.plot(valid['Prediction'])
# plt.show()
# return training data
def get_train():
X1, y1 = list(), list()
for i in range(6, len(train)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
# return validation data
def get_val():
X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
X2, y2 = get_train()
print(X2)
print(y2)
valX, valY = get_val()
print(valX)
print(valY) | # index=future_dates[-n_input:].index, columns=['Prediction'])
#
# df_proj = pd.concat([df,df_predict], axis=1)
#
| random_line_split |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i != self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if !current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line != "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len() != 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() |
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
} | identifier_body |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn | (&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i != self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if !current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line != "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len() != 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| write_rgb_separator | identifier_name |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
| if i != self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if !current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line != "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len() != 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
} | current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line | random_line_split |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i != self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if !current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line != "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len() != 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width |
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| {
x = 0;
y += 1;
} | conditional_block |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
"""
Internally, CliMetLab cache is managed by the module `climetlab.core.cache`,
it relies on a sqlite database. The :py:func:`cache_file` function provide
a unique path for a given couple (`owner`, `args`).
The calling code is responsible for checking if the file exists and
decide to read it or create it.
"""
import datetime
import hashlib
import json
import logging
import os
import shutil
import sqlite3
import threading
import time
from functools import wraps
import psutil
from filelock import FileLock
from climetlab.core.settings import SETTINGS
from climetlab.utils import humanize
from climetlab.utils.html import css
VERSION = 2
CACHE_DB = f"cache-{VERSION}.db"
LOG = logging.getLogger(__name__)
CONNECTION = None
CACHE = None
def in_executor(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
s = CACHE.enqueue(func, *args, **kwargs)
return s.result()
return wrapped
def in_executor_forget(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
CACHE.enqueue(func, *args, **kwargs)
return None
return wrapped
class Future:
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self._condition = threading.Condition()
self._ready = False
self._result = None
def execute(self):
try:
self._result = self.func(*self.args, **self.kwargs)
except Exception as e:
LOG.error(e)
self._result = e
with self._condition:
self._ready = True
self._condition.notify_all()
def result(self):
with self._condition:
while not self._ready:
self._condition.wait()
if isinstance(self._result, Exception):
raise self._result
return self._result
class Cache(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self._connection = None
self._queue = []
self._condition = threading.Condition()
def run(self):
while True:
with self._condition:
while len(self._queue) == 0:
self._condition.wait()
s = self._queue.pop(0)
self._condition.notify_all()
s.execute()
@property
def connection(self):
if self._connection is None:
cache_dir = SETTINGS.get("cache-directory")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
cache_db = os.path.join(cache_dir, CACHE_DB)
LOG.debug("Cache database is %s", cache_db)
self._connection = sqlite3.connect(cache_db)
# So we can use rows as dictionaries
self._connection.row_factory = sqlite3.Row
# If you change the schema, change VERSION above
self._connection.execute(
"""
CREATE TABLE IF NOT EXISTS cache (
path TEXT PRIMARY KEY,
owner TEXT NOT NULL,
args TEXT NOT NULL,
creation_date TEXT NOT NULL,
flags INTEGER DEFAULT 0,
owner_data TEXT,
last_access TEXT NOT NULL,
type TEXT,
parent TEXT,
replaced TEXT,
extra TEXT,
expires INTEGER,
accesses INTEGER,
size INTEGER);"""
)
return self._connection
def enqueue(self, func, *args, **kwargs):
with self._condition:
s = Future(func, args, kwargs)
self._queue.append(s)
self._condition.notify_all()
return s
def _file_in_cache_directory(self, path):
cache_directory = SETTINGS.get("cache-directory")
return path.startswith(cache_directory)
def _ensure_in_cache(self, path):
assert self._file_in_cache_directory(path), f"File not in cache {path}"
def _settings_changed(self):
LOG.debug("Settings changed")
self._connection = None # The user may have changed the cache directory
self._check_cache_size()
def _latest_date(self):
"""Returns the latest date to be used when purging the cache.
So we do not purge files being downloaded."""
with self.connection as db:
latest = db.execute(
"SELECT MIN(creation_date) FROM cache WHERE size IS NULL"
).fetchone()[0]
if latest is None:
latest = db.execute(
"SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL"
).fetchone()[0]
if latest is None:
latest = datetime.datetime.utcnow()
return latest
def _purge_cache(self, owner=None, age=None, size=None):
if owner is None and age is None and size is None:
self._decache(self._cache_size())
return
with self.connection as db:
db.execute("DELETE FROM cache WHERE owner=?", (owner,))
def _cache_entries(self):
result = []
with self.connection as db:
for n in db.execute("SELECT * FROM cache").fetchall():
n = dict(n)
n["args"] = json.loads(n["args"])
try:
n["owner_data"] = json.loads(n["owner_data"])
except Exception:
pass
if os.path.exists(n["path"]):
result.append(n)
return result
def _update_entry(self, path, owner_data=None):
self._ensure_in_cache(path)
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
with self.connection as db:
db.execute(
"UPDATE cache SET size=?, type=?, owner_data=? WHERE path=?",
(
size,
kind,
json.dumps(owner_data),
path,
),
)
def _update_cache(self, clean=False):
"""Update cache size and size of each file in the database ."""
with self.connection as db:
update = []
commit = False
for n in db.execute("SELECT path FROM cache WHERE size IS NULL"):
try:
path = n[0]
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
update.append((size, kind, path))
except Exception:
if clean:
db.execute("DELETE from cache WHERE path=?", (path,))
commit = True
if update:
db.executemany("UPDATE cache SET size=?, type=? WHERE path=?", update)
if update or commit:
db.commit()
def _housekeeping(self):
top = SETTINGS.get("cache-directory")
with self.connection as db:
for name in os.listdir(top):
if name == CACHE_DB:
continue
full = os.path.join(top, name)
count = db.execute(
"SELECT count(*) FROM cache WHERE path=?", (full,)
).fetchone()[0]
if count > 0:
continue
parent = None
start = full.split(".")[0] + "%"
for n in db.execute(
"SELECT path FROM cache WHERE parent IS NULL and path LIKE ?",
(start,),
).fetchall():
if full.startswith(n["path"]):
parent = n["path"]
break
try:
s = os.stat(full)
if time.time() - s.st_mtime < 120: # Two minutes
continue
except OSError:
pass
if parent is None:
LOG.warning(f"CliMetLab cache: orphan found: {full}")
else:
LOG.debug(
f"CliMetLab cache: orphan found: {full} with parent {parent}"
)
self._register_cache_file(
full,
"orphans",
None,
parent,
)
self._update_cache()
def _delete_file(self, path):
self._ensure_in_cache(path)
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.unlink(path)
except Exception:
LOG.exception("Deleting %s", path)
def _entry_to_dict(self, entry):
n = dict(entry)
for k in ("args", "owner_data"):
if k in n and isinstance(n[k], str):
n[k] = json.loads(n[k])
return n
def _delete_entry(self, entry):
if isinstance(entry, str):
entry = dict(
path=entry,
size=None,
owner=None,
args=None,
)
path, size, owner, args = entry, None, None, None
try:
entry["size"] = os.path.getsize(entry["path"])
except OSError:
pass
path, size, owner, args = (
entry["path"],
entry["size"],
entry["owner"],
entry["args"],
)
LOG.warning(
"Deleting entry %s", json.dumps(self._entry_to_dict(entry), indent=4)
)
total = 0
# First, delete child files, e.g. unzipped data
with self.connection as db:
for child in db.execute("SELECT * FROM cache WHERE parent = ?", (path,)):
total += self._delete_entry(child)
if not os.path.exists(path):
LOG.warning(f"cache file lost: {path}")
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total
LOG.warning(f"CliMetLab cache: deleting {path} ({humanize.bytes(size)})")
LOG.warning(f"CliMetLab cache: {owner} {args}")
self._delete_file(path)
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total + size
def _decache(self, bytes):
# _find_orphans()
# _update_cache(clean=True)
if bytes <= 0:
return 0
LOG.warning("CliMetLab cache: trying to free %s", humanize.bytes(bytes))
total = 0 | latest = self._latest_date()
for stmt in (
"SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?",
"SELECT * FROM cache WHERE size IS NOT NULL AND creation_date < ? ORDER BY last_access ASC",
):
for entry in db.execute(stmt, (latest,)):
total += self._delete_entry(entry)
if total >= bytes:
LOG.warning(
"CliMetLab cache: freed %s from cache",
humanize.bytes(bytes),
)
return total
LOG.warning("CliMetLab cache: could not free %s", humanize.bytes(bytes))
def _register_cache_file(self, path, owner, args, parent=None):
"""Register a file in the cache
Parameters
----------
path : str
Cache file to register
owner : str
Owner of the cache file (generally a source or a dataset)
args : dict
Dictionary to save with the file in the database, as json string.
Returns
-------
changes :
None or False if database does not need to be updated. TODO: clarify.
"""
self._ensure_in_cache(path)
with self.connection as db:
now = datetime.datetime.utcnow()
args = json.dumps(args)
db.execute(
"""
UPDATE cache
SET accesses = accesses + 1,
last_access = ?
WHERE path=?""",
(now, path),
)
changes = db.execute("SELECT changes()").fetchone()[0]
if not changes:
db.execute(
"""
INSERT INTO cache(
path,
owner,
args,
creation_date,
last_access,
accesses,
parent)
VALUES(?,?,?,?,?,?,?)""",
(path, owner, args, now, now, 1, parent),
)
return dict(
db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone()
)
def _cache_size(self):
with self.connection as db:
size = db.execute("SELECT SUM(size) FROM cache").fetchone()[0]
if size is None:
size = 0
return size
def _decache_file(self, path):
self._delete_entry(path)
def _check_cache_size(self):
# Check absolute limit
size = self._cache_size()
maximum = SETTINGS.get("maximum-cache-size")
if maximum is not None and size > maximum:
self._housekeeping()
self._decache(size - maximum)
# Check relative limit
size = self._cache_size()
usage = SETTINGS.get("maximum-cache-disk-usage")
cache_directory = SETTINGS.get("cache-directory")
df = psutil.disk_usage(cache_directory)
if df.percent > usage:
LOG.debug("Cache disk usage %s, limit %s", df.percent, usage)
self._housekeeping()
delta = (df.percent - usage) * df.total * 0.01
self._decache(delta)
def _repr_html_(self):
"""Return a html representation of the cache .
Returns
-------
str
HTML status of the cache.
"""
html = [css("table")]
with self.connection as db:
for n in db.execute("SELECT * FROM cache"):
html.append("<table class='climetlab'>")
html.append("<td><td colspan='2'>%s</td></tr>" % (n["path"],))
for k in [x for x in n.keys() if x not in ("path", "owner_data")]:
v = humanize.bytes(n[k]) if k == "size" else n[k]
html.append("<td><td>%s</td><td>%s</td></tr>" % (k, v))
html.append("</table>")
html.append("<br>")
return "".join(html)
def _dump_cache_database(self):
result = []
with self.connection as db:
for d in db.execute("SELECT * FROM cache"):
n = dict(d)
for k in ("args", "owner_data"):
if n[k] is not None:
n[k] = json.loads(n[k])
result.append(n)
return result
CACHE = Cache()
CACHE.start()
dump_cache_database = in_executor(CACHE._dump_cache_database)
register_cache_file = in_executor(CACHE._register_cache_file)
update_entry = in_executor(CACHE._update_entry)
check_cache_size = in_executor_forget(CACHE._check_cache_size)
cache_size = in_executor(CACHE._cache_size)
cache_entries = in_executor(CACHE._cache_entries)
purge_cache = in_executor(CACHE._purge_cache)
housekeeping = in_executor(CACHE._housekeeping)
decache_file = in_executor(CACHE._decache_file)
file_in_cache_directory = in_executor(CACHE._file_in_cache_directory)
settings_changed = in_executor(CACHE._settings_changed)
def cache_file(
owner: str,
create,
args,
hash_extra=None,
extension: str = ".cache",
force=None,
replace=None,
):
"""Creates a cache file in the climetlab cache-directory (defined in the :py:class:`Settings`).
Uses :py:func:`_register_cache_file()`
Parameters
----------
owner : str
The owner of the cache file is generally the name of the source that generated the cache.
extension : str, optional
Extension filename (such as ".nc" for NetCDF, etc.), by default ".cache"
Returns
-------
path : str
Full path to the cache file.
"""
m = hashlib.sha256()
m.update(owner.encode("utf-8"))
m.update(json.dumps(args, sort_keys=True).encode("utf-8"))
m.update(json.dumps(hash_extra, sort_keys=True).encode("utf-8"))
m.update(json.dumps(extension, sort_keys=True).encode("utf-8"))
if replace is not None:
# Don't replace files that are not in the cache
if not file_in_cache_directory(replace):
replace = None
path = os.path.join(
SETTINGS.get("cache-directory"),
"{}-{}{}".format(
owner.lower(),
m.hexdigest(),
extension,
),
)
record = register_cache_file(path, owner, args)
if os.path.exists(path):
if callable(force):
owner_data = record["owner_data"]
if owner_data is not None:
owner_data = json.loads(owner_data)
force = force(args, path, owner_data)
if force:
decache_file(path)
if not os.path.exists(path):
lock = path + ".lock"
with FileLock(lock):
if not os.path.exists(
path
): # Check again, another thread/process may have created the file
owner_data = create(path + ".tmp", args)
os.rename(path + ".tmp", path)
update_entry(path, owner_data)
check_cache_size()
try:
os.unlink(lock)
except OSError:
pass
return path
def auxiliary_cache_file(
owner,
path,
index=0,
content=None,
extension=".cache",
):
# Create an auxiliary cache file
# to be used for example to cache an index
# It is invalidated if `path` is changed
stat = os.stat(path)
def create(target, args):
# Simply touch the file
with open(target, "w") as f:
if content:
f.write(content)
return cache_file(
owner,
create,
(
path,
stat.st_ctime,
stat.st_mtime,
stat.st_size,
index,
),
extension=extension,
)
# housekeeping()
SETTINGS.on_change(settings_changed) |
with self.connection as db:
| random_line_split |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
"""
Internally, CliMetLab cache is managed by the module `climetlab.core.cache`,
it relies on a sqlite database. The :py:func:`cache_file` function provide
a unique path for a given couple (`owner`, `args`).
The calling code is responsible for checking if the file exists and
decide to read it or create it.
"""
import datetime
import hashlib
import json
import logging
import os
import shutil
import sqlite3
import threading
import time
from functools import wraps
import psutil
from filelock import FileLock
from climetlab.core.settings import SETTINGS
from climetlab.utils import humanize
from climetlab.utils.html import css
VERSION = 2
CACHE_DB = f"cache-{VERSION}.db"
LOG = logging.getLogger(__name__)
CONNECTION = None
CACHE = None
def in_executor(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
s = CACHE.enqueue(func, *args, **kwargs)
return s.result()
return wrapped
def in_executor_forget(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
CACHE.enqueue(func, *args, **kwargs)
return None
return wrapped
class Future:
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self._condition = threading.Condition()
self._ready = False
self._result = None
def execute(self):
try:
self._result = self.func(*self.args, **self.kwargs)
except Exception as e:
LOG.error(e)
self._result = e
with self._condition:
self._ready = True
self._condition.notify_all()
def result(self):
with self._condition:
while not self._ready:
self._condition.wait()
if isinstance(self._result, Exception):
raise self._result
return self._result
class Cache(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self._connection = None
self._queue = []
self._condition = threading.Condition()
def run(self):
while True:
with self._condition:
while len(self._queue) == 0:
self._condition.wait()
s = self._queue.pop(0)
self._condition.notify_all()
s.execute()
@property
def | (self):
if self._connection is None:
cache_dir = SETTINGS.get("cache-directory")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
cache_db = os.path.join(cache_dir, CACHE_DB)
LOG.debug("Cache database is %s", cache_db)
self._connection = sqlite3.connect(cache_db)
# So we can use rows as dictionaries
self._connection.row_factory = sqlite3.Row
# If you change the schema, change VERSION above
self._connection.execute(
"""
CREATE TABLE IF NOT EXISTS cache (
path TEXT PRIMARY KEY,
owner TEXT NOT NULL,
args TEXT NOT NULL,
creation_date TEXT NOT NULL,
flags INTEGER DEFAULT 0,
owner_data TEXT,
last_access TEXT NOT NULL,
type TEXT,
parent TEXT,
replaced TEXT,
extra TEXT,
expires INTEGER,
accesses INTEGER,
size INTEGER);"""
)
return self._connection
def enqueue(self, func, *args, **kwargs):
with self._condition:
s = Future(func, args, kwargs)
self._queue.append(s)
self._condition.notify_all()
return s
def _file_in_cache_directory(self, path):
cache_directory = SETTINGS.get("cache-directory")
return path.startswith(cache_directory)
def _ensure_in_cache(self, path):
assert self._file_in_cache_directory(path), f"File not in cache {path}"
def _settings_changed(self):
LOG.debug("Settings changed")
self._connection = None # The user may have changed the cache directory
self._check_cache_size()
def _latest_date(self):
"""Returns the latest date to be used when purging the cache.
So we do not purge files being downloaded."""
with self.connection as db:
latest = db.execute(
"SELECT MIN(creation_date) FROM cache WHERE size IS NULL"
).fetchone()[0]
if latest is None:
latest = db.execute(
"SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL"
).fetchone()[0]
if latest is None:
latest = datetime.datetime.utcnow()
return latest
def _purge_cache(self, owner=None, age=None, size=None):
if owner is None and age is None and size is None:
self._decache(self._cache_size())
return
with self.connection as db:
db.execute("DELETE FROM cache WHERE owner=?", (owner,))
def _cache_entries(self):
result = []
with self.connection as db:
for n in db.execute("SELECT * FROM cache").fetchall():
n = dict(n)
n["args"] = json.loads(n["args"])
try:
n["owner_data"] = json.loads(n["owner_data"])
except Exception:
pass
if os.path.exists(n["path"]):
result.append(n)
return result
def _update_entry(self, path, owner_data=None):
self._ensure_in_cache(path)
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
with self.connection as db:
db.execute(
"UPDATE cache SET size=?, type=?, owner_data=? WHERE path=?",
(
size,
kind,
json.dumps(owner_data),
path,
),
)
def _update_cache(self, clean=False):
"""Update cache size and size of each file in the database ."""
with self.connection as db:
update = []
commit = False
for n in db.execute("SELECT path FROM cache WHERE size IS NULL"):
try:
path = n[0]
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
update.append((size, kind, path))
except Exception:
if clean:
db.execute("DELETE from cache WHERE path=?", (path,))
commit = True
if update:
db.executemany("UPDATE cache SET size=?, type=? WHERE path=?", update)
if update or commit:
db.commit()
def _housekeeping(self):
top = SETTINGS.get("cache-directory")
with self.connection as db:
for name in os.listdir(top):
if name == CACHE_DB:
continue
full = os.path.join(top, name)
count = db.execute(
"SELECT count(*) FROM cache WHERE path=?", (full,)
).fetchone()[0]
if count > 0:
continue
parent = None
start = full.split(".")[0] + "%"
for n in db.execute(
"SELECT path FROM cache WHERE parent IS NULL and path LIKE ?",
(start,),
).fetchall():
if full.startswith(n["path"]):
parent = n["path"]
break
try:
s = os.stat(full)
if time.time() - s.st_mtime < 120: # Two minutes
continue
except OSError:
pass
if parent is None:
LOG.warning(f"CliMetLab cache: orphan found: {full}")
else:
LOG.debug(
f"CliMetLab cache: orphan found: {full} with parent {parent}"
)
self._register_cache_file(
full,
"orphans",
None,
parent,
)
self._update_cache()
def _delete_file(self, path):
self._ensure_in_cache(path)
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.unlink(path)
except Exception:
LOG.exception("Deleting %s", path)
def _entry_to_dict(self, entry):
n = dict(entry)
for k in ("args", "owner_data"):
if k in n and isinstance(n[k], str):
n[k] = json.loads(n[k])
return n
def _delete_entry(self, entry):
if isinstance(entry, str):
entry = dict(
path=entry,
size=None,
owner=None,
args=None,
)
path, size, owner, args = entry, None, None, None
try:
entry["size"] = os.path.getsize(entry["path"])
except OSError:
pass
path, size, owner, args = (
entry["path"],
entry["size"],
entry["owner"],
entry["args"],
)
LOG.warning(
"Deleting entry %s", json.dumps(self._entry_to_dict(entry), indent=4)
)
total = 0
# First, delete child files, e.g. unzipped data
with self.connection as db:
for child in db.execute("SELECT * FROM cache WHERE parent = ?", (path,)):
total += self._delete_entry(child)
if not os.path.exists(path):
LOG.warning(f"cache file lost: {path}")
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total
LOG.warning(f"CliMetLab cache: deleting {path} ({humanize.bytes(size)})")
LOG.warning(f"CliMetLab cache: {owner} {args}")
self._delete_file(path)
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total + size
def _decache(self, bytes):
# _find_orphans()
# _update_cache(clean=True)
if bytes <= 0:
return 0
LOG.warning("CliMetLab cache: trying to free %s", humanize.bytes(bytes))
total = 0
with self.connection as db:
latest = self._latest_date()
for stmt in (
"SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?",
"SELECT * FROM cache WHERE size IS NOT NULL AND creation_date < ? ORDER BY last_access ASC",
):
for entry in db.execute(stmt, (latest,)):
total += self._delete_entry(entry)
if total >= bytes:
LOG.warning(
"CliMetLab cache: freed %s from cache",
humanize.bytes(bytes),
)
return total
LOG.warning("CliMetLab cache: could not free %s", humanize.bytes(bytes))
def _register_cache_file(self, path, owner, args, parent=None):
"""Register a file in the cache
Parameters
----------
path : str
Cache file to register
owner : str
Owner of the cache file (generally a source or a dataset)
args : dict
Dictionary to save with the file in the database, as json string.
Returns
-------
changes :
None or False if database does not need to be updated. TODO: clarify.
"""
self._ensure_in_cache(path)
with self.connection as db:
now = datetime.datetime.utcnow()
args = json.dumps(args)
db.execute(
"""
UPDATE cache
SET accesses = accesses + 1,
last_access = ?
WHERE path=?""",
(now, path),
)
changes = db.execute("SELECT changes()").fetchone()[0]
if not changes:
db.execute(
"""
INSERT INTO cache(
path,
owner,
args,
creation_date,
last_access,
accesses,
parent)
VALUES(?,?,?,?,?,?,?)""",
(path, owner, args, now, now, 1, parent),
)
return dict(
db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone()
)
def _cache_size(self):
with self.connection as db:
size = db.execute("SELECT SUM(size) FROM cache").fetchone()[0]
if size is None:
size = 0
return size
def _decache_file(self, path):
self._delete_entry(path)
def _check_cache_size(self):
# Check absolute limit
size = self._cache_size()
maximum = SETTINGS.get("maximum-cache-size")
if maximum is not None and size > maximum:
self._housekeeping()
self._decache(size - maximum)
# Check relative limit
size = self._cache_size()
usage = SETTINGS.get("maximum-cache-disk-usage")
cache_directory = SETTINGS.get("cache-directory")
df = psutil.disk_usage(cache_directory)
if df.percent > usage:
LOG.debug("Cache disk usage %s, limit %s", df.percent, usage)
self._housekeeping()
delta = (df.percent - usage) * df.total * 0.01
self._decache(delta)
def _repr_html_(self):
"""Return a html representation of the cache .
Returns
-------
str
HTML status of the cache.
"""
html = [css("table")]
with self.connection as db:
for n in db.execute("SELECT * FROM cache"):
html.append("<table class='climetlab'>")
html.append("<td><td colspan='2'>%s</td></tr>" % (n["path"],))
for k in [x for x in n.keys() if x not in ("path", "owner_data")]:
v = humanize.bytes(n[k]) if k == "size" else n[k]
html.append("<td><td>%s</td><td>%s</td></tr>" % (k, v))
html.append("</table>")
html.append("<br>")
return "".join(html)
def _dump_cache_database(self):
result = []
with self.connection as db:
for d in db.execute("SELECT * FROM cache"):
n = dict(d)
for k in ("args", "owner_data"):
if n[k] is not None:
n[k] = json.loads(n[k])
result.append(n)
return result
CACHE = Cache()
CACHE.start()
dump_cache_database = in_executor(CACHE._dump_cache_database)
register_cache_file = in_executor(CACHE._register_cache_file)
update_entry = in_executor(CACHE._update_entry)
check_cache_size = in_executor_forget(CACHE._check_cache_size)
cache_size = in_executor(CACHE._cache_size)
cache_entries = in_executor(CACHE._cache_entries)
purge_cache = in_executor(CACHE._purge_cache)
housekeeping = in_executor(CACHE._housekeeping)
decache_file = in_executor(CACHE._decache_file)
file_in_cache_directory = in_executor(CACHE._file_in_cache_directory)
settings_changed = in_executor(CACHE._settings_changed)
def cache_file(
owner: str,
create,
args,
hash_extra=None,
extension: str = ".cache",
force=None,
replace=None,
):
"""Creates a cache file in the climetlab cache-directory (defined in the :py:class:`Settings`).
Uses :py:func:`_register_cache_file()`
Parameters
----------
owner : str
The owner of the cache file is generally the name of the source that generated the cache.
extension : str, optional
Extension filename (such as ".nc" for NetCDF, etc.), by default ".cache"
Returns
-------
path : str
Full path to the cache file.
"""
m = hashlib.sha256()
m.update(owner.encode("utf-8"))
m.update(json.dumps(args, sort_keys=True).encode("utf-8"))
m.update(json.dumps(hash_extra, sort_keys=True).encode("utf-8"))
m.update(json.dumps(extension, sort_keys=True).encode("utf-8"))
if replace is not None:
# Don't replace files that are not in the cache
if not file_in_cache_directory(replace):
replace = None
path = os.path.join(
SETTINGS.get("cache-directory"),
"{}-{}{}".format(
owner.lower(),
m.hexdigest(),
extension,
),
)
record = register_cache_file(path, owner, args)
if os.path.exists(path):
if callable(force):
owner_data = record["owner_data"]
if owner_data is not None:
owner_data = json.loads(owner_data)
force = force(args, path, owner_data)
if force:
decache_file(path)
if not os.path.exists(path):
lock = path + ".lock"
with FileLock(lock):
if not os.path.exists(
path
): # Check again, another thread/process may have created the file
owner_data = create(path + ".tmp", args)
os.rename(path + ".tmp", path)
update_entry(path, owner_data)
check_cache_size()
try:
os.unlink(lock)
except OSError:
pass
return path
def auxiliary_cache_file(
owner,
path,
index=0,
content=None,
extension=".cache",
):
# Create an auxiliary cache file
# to be used for example to cache an index
# It is invalidated if `path` is changed
stat = os.stat(path)
def create(target, args):
# Simply touch the file
with open(target, "w") as f:
if content:
f.write(content)
return cache_file(
owner,
create,
(
path,
stat.st_ctime,
stat.st_mtime,
stat.st_size,
index,
),
extension=extension,
)
# housekeeping()
SETTINGS.on_change(settings_changed)
| connection | identifier_name |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
"""
Internally, CliMetLab cache is managed by the module `climetlab.core.cache`,
it relies on a sqlite database. The :py:func:`cache_file` function provide
a unique path for a given couple (`owner`, `args`).
The calling code is responsible for checking if the file exists and
decide to read it or create it.
"""
import datetime
import hashlib
import json
import logging
import os
import shutil
import sqlite3
import threading
import time
from functools import wraps
import psutil
from filelock import FileLock
from climetlab.core.settings import SETTINGS
from climetlab.utils import humanize
from climetlab.utils.html import css
VERSION = 2
CACHE_DB = f"cache-{VERSION}.db"
LOG = logging.getLogger(__name__)
CONNECTION = None
CACHE = None
def in_executor(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
s = CACHE.enqueue(func, *args, **kwargs)
return s.result()
return wrapped
def in_executor_forget(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
CACHE.enqueue(func, *args, **kwargs)
return None
return wrapped
class Future:
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self._condition = threading.Condition()
self._ready = False
self._result = None
def execute(self):
try:
self._result = self.func(*self.args, **self.kwargs)
except Exception as e:
LOG.error(e)
self._result = e
with self._condition:
self._ready = True
self._condition.notify_all()
def result(self):
with self._condition:
while not self._ready:
self._condition.wait()
if isinstance(self._result, Exception):
raise self._result
return self._result
class Cache(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self._connection = None
self._queue = []
self._condition = threading.Condition()
def run(self):
while True:
with self._condition:
while len(self._queue) == 0:
self._condition.wait()
s = self._queue.pop(0)
self._condition.notify_all()
s.execute()
@property
def connection(self):
if self._connection is None:
cache_dir = SETTINGS.get("cache-directory")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
cache_db = os.path.join(cache_dir, CACHE_DB)
LOG.debug("Cache database is %s", cache_db)
self._connection = sqlite3.connect(cache_db)
# So we can use rows as dictionaries
self._connection.row_factory = sqlite3.Row
# If you change the schema, change VERSION above
self._connection.execute(
"""
CREATE TABLE IF NOT EXISTS cache (
path TEXT PRIMARY KEY,
owner TEXT NOT NULL,
args TEXT NOT NULL,
creation_date TEXT NOT NULL,
flags INTEGER DEFAULT 0,
owner_data TEXT,
last_access TEXT NOT NULL,
type TEXT,
parent TEXT,
replaced TEXT,
extra TEXT,
expires INTEGER,
accesses INTEGER,
size INTEGER);"""
)
return self._connection
def enqueue(self, func, *args, **kwargs):
with self._condition:
s = Future(func, args, kwargs)
self._queue.append(s)
self._condition.notify_all()
return s
def _file_in_cache_directory(self, path):
cache_directory = SETTINGS.get("cache-directory")
return path.startswith(cache_directory)
def _ensure_in_cache(self, path):
assert self._file_in_cache_directory(path), f"File not in cache {path}"
def _settings_changed(self):
LOG.debug("Settings changed")
self._connection = None # The user may have changed the cache directory
self._check_cache_size()
def _latest_date(self):
"""Returns the latest date to be used when purging the cache.
So we do not purge files being downloaded."""
with self.connection as db:
latest = db.execute(
"SELECT MIN(creation_date) FROM cache WHERE size IS NULL"
).fetchone()[0]
if latest is None:
latest = db.execute(
"SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL"
).fetchone()[0]
if latest is None:
latest = datetime.datetime.utcnow()
return latest
def _purge_cache(self, owner=None, age=None, size=None):
if owner is None and age is None and size is None:
self._decache(self._cache_size())
return
with self.connection as db:
db.execute("DELETE FROM cache WHERE owner=?", (owner,))
def _cache_entries(self):
result = []
with self.connection as db:
for n in db.execute("SELECT * FROM cache").fetchall():
n = dict(n)
n["args"] = json.loads(n["args"])
try:
n["owner_data"] = json.loads(n["owner_data"])
except Exception:
pass
if os.path.exists(n["path"]):
result.append(n)
return result
def _update_entry(self, path, owner_data=None):
self._ensure_in_cache(path)
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
with self.connection as db:
db.execute(
"UPDATE cache SET size=?, type=?, owner_data=? WHERE path=?",
(
size,
kind,
json.dumps(owner_data),
path,
),
)
def _update_cache(self, clean=False):
"""Update cache size and size of each file in the database ."""
with self.connection as db:
update = []
commit = False
for n in db.execute("SELECT path FROM cache WHERE size IS NULL"):
try:
path = n[0]
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
update.append((size, kind, path))
except Exception:
if clean:
db.execute("DELETE from cache WHERE path=?", (path,))
commit = True
if update:
db.executemany("UPDATE cache SET size=?, type=? WHERE path=?", update)
if update or commit:
db.commit()
def _housekeeping(self):
top = SETTINGS.get("cache-directory")
with self.connection as db:
for name in os.listdir(top):
if name == CACHE_DB:
continue
full = os.path.join(top, name)
count = db.execute(
"SELECT count(*) FROM cache WHERE path=?", (full,)
).fetchone()[0]
if count > 0:
continue
parent = None
start = full.split(".")[0] + "%"
for n in db.execute(
"SELECT path FROM cache WHERE parent IS NULL and path LIKE ?",
(start,),
).fetchall():
if full.startswith(n["path"]):
parent = n["path"]
break
try:
s = os.stat(full)
if time.time() - s.st_mtime < 120: # Two minutes
continue
except OSError:
pass
if parent is None:
LOG.warning(f"CliMetLab cache: orphan found: {full}")
else:
|
self._register_cache_file(
full,
"orphans",
None,
parent,
)
self._update_cache()
def _delete_file(self, path):
self._ensure_in_cache(path)
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.unlink(path)
except Exception:
LOG.exception("Deleting %s", path)
def _entry_to_dict(self, entry):
n = dict(entry)
for k in ("args", "owner_data"):
if k in n and isinstance(n[k], str):
n[k] = json.loads(n[k])
return n
def _delete_entry(self, entry):
if isinstance(entry, str):
entry = dict(
path=entry,
size=None,
owner=None,
args=None,
)
path, size, owner, args = entry, None, None, None
try:
entry["size"] = os.path.getsize(entry["path"])
except OSError:
pass
path, size, owner, args = (
entry["path"],
entry["size"],
entry["owner"],
entry["args"],
)
LOG.warning(
"Deleting entry %s", json.dumps(self._entry_to_dict(entry), indent=4)
)
total = 0
# First, delete child files, e.g. unzipped data
with self.connection as db:
for child in db.execute("SELECT * FROM cache WHERE parent = ?", (path,)):
total += self._delete_entry(child)
if not os.path.exists(path):
LOG.warning(f"cache file lost: {path}")
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total
LOG.warning(f"CliMetLab cache: deleting {path} ({humanize.bytes(size)})")
LOG.warning(f"CliMetLab cache: {owner} {args}")
self._delete_file(path)
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total + size
def _decache(self, bytes):
# _find_orphans()
# _update_cache(clean=True)
if bytes <= 0:
return 0
LOG.warning("CliMetLab cache: trying to free %s", humanize.bytes(bytes))
total = 0
with self.connection as db:
latest = self._latest_date()
for stmt in (
"SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?",
"SELECT * FROM cache WHERE size IS NOT NULL AND creation_date < ? ORDER BY last_access ASC",
):
for entry in db.execute(stmt, (latest,)):
total += self._delete_entry(entry)
if total >= bytes:
LOG.warning(
"CliMetLab cache: freed %s from cache",
humanize.bytes(bytes),
)
return total
LOG.warning("CliMetLab cache: could not free %s", humanize.bytes(bytes))
def _register_cache_file(self, path, owner, args, parent=None):
"""Register a file in the cache
Parameters
----------
path : str
Cache file to register
owner : str
Owner of the cache file (generally a source or a dataset)
args : dict
Dictionary to save with the file in the database, as json string.
Returns
-------
changes :
None or False if database does not need to be updated. TODO: clarify.
"""
self._ensure_in_cache(path)
with self.connection as db:
now = datetime.datetime.utcnow()
args = json.dumps(args)
db.execute(
"""
UPDATE cache
SET accesses = accesses + 1,
last_access = ?
WHERE path=?""",
(now, path),
)
changes = db.execute("SELECT changes()").fetchone()[0]
if not changes:
db.execute(
"""
INSERT INTO cache(
path,
owner,
args,
creation_date,
last_access,
accesses,
parent)
VALUES(?,?,?,?,?,?,?)""",
(path, owner, args, now, now, 1, parent),
)
return dict(
db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone()
)
def _cache_size(self):
with self.connection as db:
size = db.execute("SELECT SUM(size) FROM cache").fetchone()[0]
if size is None:
size = 0
return size
def _decache_file(self, path):
self._delete_entry(path)
def _check_cache_size(self):
# Check absolute limit
size = self._cache_size()
maximum = SETTINGS.get("maximum-cache-size")
if maximum is not None and size > maximum:
self._housekeeping()
self._decache(size - maximum)
# Check relative limit
size = self._cache_size()
usage = SETTINGS.get("maximum-cache-disk-usage")
cache_directory = SETTINGS.get("cache-directory")
df = psutil.disk_usage(cache_directory)
if df.percent > usage:
LOG.debug("Cache disk usage %s, limit %s", df.percent, usage)
self._housekeeping()
delta = (df.percent - usage) * df.total * 0.01
self._decache(delta)
def _repr_html_(self):
"""Return a html representation of the cache .
Returns
-------
str
HTML status of the cache.
"""
html = [css("table")]
with self.connection as db:
for n in db.execute("SELECT * FROM cache"):
html.append("<table class='climetlab'>")
html.append("<td><td colspan='2'>%s</td></tr>" % (n["path"],))
for k in [x for x in n.keys() if x not in ("path", "owner_data")]:
v = humanize.bytes(n[k]) if k == "size" else n[k]
html.append("<td><td>%s</td><td>%s</td></tr>" % (k, v))
html.append("</table>")
html.append("<br>")
return "".join(html)
def _dump_cache_database(self):
result = []
with self.connection as db:
for d in db.execute("SELECT * FROM cache"):
n = dict(d)
for k in ("args", "owner_data"):
if n[k] is not None:
n[k] = json.loads(n[k])
result.append(n)
return result
CACHE = Cache()
CACHE.start()
dump_cache_database = in_executor(CACHE._dump_cache_database)
register_cache_file = in_executor(CACHE._register_cache_file)
update_entry = in_executor(CACHE._update_entry)
check_cache_size = in_executor_forget(CACHE._check_cache_size)
cache_size = in_executor(CACHE._cache_size)
cache_entries = in_executor(CACHE._cache_entries)
purge_cache = in_executor(CACHE._purge_cache)
housekeeping = in_executor(CACHE._housekeeping)
decache_file = in_executor(CACHE._decache_file)
file_in_cache_directory = in_executor(CACHE._file_in_cache_directory)
settings_changed = in_executor(CACHE._settings_changed)
def cache_file(
owner: str,
create,
args,
hash_extra=None,
extension: str = ".cache",
force=None,
replace=None,
):
"""Creates a cache file in the climetlab cache-directory (defined in the :py:class:`Settings`).
Uses :py:func:`_register_cache_file()`
Parameters
----------
owner : str
The owner of the cache file is generally the name of the source that generated the cache.
extension : str, optional
Extension filename (such as ".nc" for NetCDF, etc.), by default ".cache"
Returns
-------
path : str
Full path to the cache file.
"""
m = hashlib.sha256()
m.update(owner.encode("utf-8"))
m.update(json.dumps(args, sort_keys=True).encode("utf-8"))
m.update(json.dumps(hash_extra, sort_keys=True).encode("utf-8"))
m.update(json.dumps(extension, sort_keys=True).encode("utf-8"))
if replace is not None:
# Don't replace files that are not in the cache
if not file_in_cache_directory(replace):
replace = None
path = os.path.join(
SETTINGS.get("cache-directory"),
"{}-{}{}".format(
owner.lower(),
m.hexdigest(),
extension,
),
)
record = register_cache_file(path, owner, args)
if os.path.exists(path):
if callable(force):
owner_data = record["owner_data"]
if owner_data is not None:
owner_data = json.loads(owner_data)
force = force(args, path, owner_data)
if force:
decache_file(path)
if not os.path.exists(path):
lock = path + ".lock"
with FileLock(lock):
if not os.path.exists(
path
): # Check again, another thread/process may have created the file
owner_data = create(path + ".tmp", args)
os.rename(path + ".tmp", path)
update_entry(path, owner_data)
check_cache_size()
try:
os.unlink(lock)
except OSError:
pass
return path
def auxiliary_cache_file(
owner,
path,
index=0,
content=None,
extension=".cache",
):
# Create an auxiliary cache file
# to be used for example to cache an index
# It is invalidated if `path` is changed
stat = os.stat(path)
def create(target, args):
# Simply touch the file
with open(target, "w") as f:
if content:
f.write(content)
return cache_file(
owner,
create,
(
path,
stat.st_ctime,
stat.st_mtime,
stat.st_size,
index,
),
extension=extension,
)
# housekeeping()
SETTINGS.on_change(settings_changed)
| LOG.debug(
f"CliMetLab cache: orphan found: {full} with parent {parent}"
) | conditional_block |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
"""
Internally, CliMetLab cache is managed by the module `climetlab.core.cache`,
it relies on a sqlite database. The :py:func:`cache_file` function provide
a unique path for a given couple (`owner`, `args`).
The calling code is responsible for checking if the file exists and
decide to read it or create it.
"""
import datetime
import hashlib
import json
import logging
import os
import shutil
import sqlite3
import threading
import time
from functools import wraps
import psutil
from filelock import FileLock
from climetlab.core.settings import SETTINGS
from climetlab.utils import humanize
from climetlab.utils.html import css
VERSION = 2
CACHE_DB = f"cache-{VERSION}.db"
LOG = logging.getLogger(__name__)
CONNECTION = None
CACHE = None
def in_executor(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
s = CACHE.enqueue(func, *args, **kwargs)
return s.result()
return wrapped
def in_executor_forget(func):
@wraps(func)
def wrapped(*args, **kwargs):
global CACHE
CACHE.enqueue(func, *args, **kwargs)
return None
return wrapped
class Future:
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self._condition = threading.Condition()
self._ready = False
self._result = None
def execute(self):
try:
self._result = self.func(*self.args, **self.kwargs)
except Exception as e:
LOG.error(e)
self._result = e
with self._condition:
self._ready = True
self._condition.notify_all()
def result(self):
with self._condition:
while not self._ready:
self._condition.wait()
if isinstance(self._result, Exception):
raise self._result
return self._result
class Cache(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self._connection = None
self._queue = []
self._condition = threading.Condition()
def run(self):
while True:
with self._condition:
while len(self._queue) == 0:
self._condition.wait()
s = self._queue.pop(0)
self._condition.notify_all()
s.execute()
@property
def connection(self):
if self._connection is None:
cache_dir = SETTINGS.get("cache-directory")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
cache_db = os.path.join(cache_dir, CACHE_DB)
LOG.debug("Cache database is %s", cache_db)
self._connection = sqlite3.connect(cache_db)
# So we can use rows as dictionaries
self._connection.row_factory = sqlite3.Row
# If you change the schema, change VERSION above
self._connection.execute(
"""
CREATE TABLE IF NOT EXISTS cache (
path TEXT PRIMARY KEY,
owner TEXT NOT NULL,
args TEXT NOT NULL,
creation_date TEXT NOT NULL,
flags INTEGER DEFAULT 0,
owner_data TEXT,
last_access TEXT NOT NULL,
type TEXT,
parent TEXT,
replaced TEXT,
extra TEXT,
expires INTEGER,
accesses INTEGER,
size INTEGER);"""
)
return self._connection
def enqueue(self, func, *args, **kwargs):
with self._condition:
s = Future(func, args, kwargs)
self._queue.append(s)
self._condition.notify_all()
return s
def _file_in_cache_directory(self, path):
cache_directory = SETTINGS.get("cache-directory")
return path.startswith(cache_directory)
def _ensure_in_cache(self, path):
assert self._file_in_cache_directory(path), f"File not in cache {path}"
def _settings_changed(self):
LOG.debug("Settings changed")
self._connection = None # The user may have changed the cache directory
self._check_cache_size()
def _latest_date(self):
"""Returns the latest date to be used when purging the cache.
So we do not purge files being downloaded."""
with self.connection as db:
latest = db.execute(
"SELECT MIN(creation_date) FROM cache WHERE size IS NULL"
).fetchone()[0]
if latest is None:
latest = db.execute(
"SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL"
).fetchone()[0]
if latest is None:
latest = datetime.datetime.utcnow()
return latest
def _purge_cache(self, owner=None, age=None, size=None):
if owner is None and age is None and size is None:
self._decache(self._cache_size())
return
with self.connection as db:
db.execute("DELETE FROM cache WHERE owner=?", (owner,))
def _cache_entries(self):
result = []
with self.connection as db:
for n in db.execute("SELECT * FROM cache").fetchall():
n = dict(n)
n["args"] = json.loads(n["args"])
try:
n["owner_data"] = json.loads(n["owner_data"])
except Exception:
pass
if os.path.exists(n["path"]):
result.append(n)
return result
def _update_entry(self, path, owner_data=None):
self._ensure_in_cache(path)
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
with self.connection as db:
db.execute(
"UPDATE cache SET size=?, type=?, owner_data=? WHERE path=?",
(
size,
kind,
json.dumps(owner_data),
path,
),
)
def _update_cache(self, clean=False):
|
def _housekeeping(self):
top = SETTINGS.get("cache-directory")
with self.connection as db:
for name in os.listdir(top):
if name == CACHE_DB:
continue
full = os.path.join(top, name)
count = db.execute(
"SELECT count(*) FROM cache WHERE path=?", (full,)
).fetchone()[0]
if count > 0:
continue
parent = None
start = full.split(".")[0] + "%"
for n in db.execute(
"SELECT path FROM cache WHERE parent IS NULL and path LIKE ?",
(start,),
).fetchall():
if full.startswith(n["path"]):
parent = n["path"]
break
try:
s = os.stat(full)
if time.time() - s.st_mtime < 120: # Two minutes
continue
except OSError:
pass
if parent is None:
LOG.warning(f"CliMetLab cache: orphan found: {full}")
else:
LOG.debug(
f"CliMetLab cache: orphan found: {full} with parent {parent}"
)
self._register_cache_file(
full,
"orphans",
None,
parent,
)
self._update_cache()
def _delete_file(self, path):
self._ensure_in_cache(path)
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.unlink(path)
except Exception:
LOG.exception("Deleting %s", path)
def _entry_to_dict(self, entry):
n = dict(entry)
for k in ("args", "owner_data"):
if k in n and isinstance(n[k], str):
n[k] = json.loads(n[k])
return n
def _delete_entry(self, entry):
if isinstance(entry, str):
entry = dict(
path=entry,
size=None,
owner=None,
args=None,
)
path, size, owner, args = entry, None, None, None
try:
entry["size"] = os.path.getsize(entry["path"])
except OSError:
pass
path, size, owner, args = (
entry["path"],
entry["size"],
entry["owner"],
entry["args"],
)
LOG.warning(
"Deleting entry %s", json.dumps(self._entry_to_dict(entry), indent=4)
)
total = 0
# First, delete child files, e.g. unzipped data
with self.connection as db:
for child in db.execute("SELECT * FROM cache WHERE parent = ?", (path,)):
total += self._delete_entry(child)
if not os.path.exists(path):
LOG.warning(f"cache file lost: {path}")
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total
LOG.warning(f"CliMetLab cache: deleting {path} ({humanize.bytes(size)})")
LOG.warning(f"CliMetLab cache: {owner} {args}")
self._delete_file(path)
with self.connection as db:
db.execute("DELETE FROM cache WHERE path=?", (path,))
return total + size
def _decache(self, bytes):
# _find_orphans()
# _update_cache(clean=True)
if bytes <= 0:
return 0
LOG.warning("CliMetLab cache: trying to free %s", humanize.bytes(bytes))
total = 0
with self.connection as db:
latest = self._latest_date()
for stmt in (
"SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?",
"SELECT * FROM cache WHERE size IS NOT NULL AND creation_date < ? ORDER BY last_access ASC",
):
for entry in db.execute(stmt, (latest,)):
total += self._delete_entry(entry)
if total >= bytes:
LOG.warning(
"CliMetLab cache: freed %s from cache",
humanize.bytes(bytes),
)
return total
LOG.warning("CliMetLab cache: could not free %s", humanize.bytes(bytes))
def _register_cache_file(self, path, owner, args, parent=None):
"""Register a file in the cache
Parameters
----------
path : str
Cache file to register
owner : str
Owner of the cache file (generally a source or a dataset)
args : dict
Dictionary to save with the file in the database, as json string.
Returns
-------
changes :
None or False if database does not need to be updated. TODO: clarify.
"""
self._ensure_in_cache(path)
with self.connection as db:
now = datetime.datetime.utcnow()
args = json.dumps(args)
db.execute(
"""
UPDATE cache
SET accesses = accesses + 1,
last_access = ?
WHERE path=?""",
(now, path),
)
changes = db.execute("SELECT changes()").fetchone()[0]
if not changes:
db.execute(
"""
INSERT INTO cache(
path,
owner,
args,
creation_date,
last_access,
accesses,
parent)
VALUES(?,?,?,?,?,?,?)""",
(path, owner, args, now, now, 1, parent),
)
return dict(
db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone()
)
def _cache_size(self):
with self.connection as db:
size = db.execute("SELECT SUM(size) FROM cache").fetchone()[0]
if size is None:
size = 0
return size
def _decache_file(self, path):
self._delete_entry(path)
def _check_cache_size(self):
# Check absolute limit
size = self._cache_size()
maximum = SETTINGS.get("maximum-cache-size")
if maximum is not None and size > maximum:
self._housekeeping()
self._decache(size - maximum)
# Check relative limit
size = self._cache_size()
usage = SETTINGS.get("maximum-cache-disk-usage")
cache_directory = SETTINGS.get("cache-directory")
df = psutil.disk_usage(cache_directory)
if df.percent > usage:
LOG.debug("Cache disk usage %s, limit %s", df.percent, usage)
self._housekeeping()
delta = (df.percent - usage) * df.total * 0.01
self._decache(delta)
def _repr_html_(self):
"""Return a html representation of the cache .
Returns
-------
str
HTML status of the cache.
"""
html = [css("table")]
with self.connection as db:
for n in db.execute("SELECT * FROM cache"):
html.append("<table class='climetlab'>")
html.append("<td><td colspan='2'>%s</td></tr>" % (n["path"],))
for k in [x for x in n.keys() if x not in ("path", "owner_data")]:
v = humanize.bytes(n[k]) if k == "size" else n[k]
html.append("<td><td>%s</td><td>%s</td></tr>" % (k, v))
html.append("</table>")
html.append("<br>")
return "".join(html)
def _dump_cache_database(self):
result = []
with self.connection as db:
for d in db.execute("SELECT * FROM cache"):
n = dict(d)
for k in ("args", "owner_data"):
if n[k] is not None:
n[k] = json.loads(n[k])
result.append(n)
return result
CACHE = Cache()
CACHE.start()
dump_cache_database = in_executor(CACHE._dump_cache_database)
register_cache_file = in_executor(CACHE._register_cache_file)
update_entry = in_executor(CACHE._update_entry)
check_cache_size = in_executor_forget(CACHE._check_cache_size)
cache_size = in_executor(CACHE._cache_size)
cache_entries = in_executor(CACHE._cache_entries)
purge_cache = in_executor(CACHE._purge_cache)
housekeeping = in_executor(CACHE._housekeeping)
decache_file = in_executor(CACHE._decache_file)
file_in_cache_directory = in_executor(CACHE._file_in_cache_directory)
settings_changed = in_executor(CACHE._settings_changed)
def cache_file(
owner: str,
create,
args,
hash_extra=None,
extension: str = ".cache",
force=None,
replace=None,
):
"""Creates a cache file in the climetlab cache-directory (defined in the :py:class:`Settings`).
Uses :py:func:`_register_cache_file()`
Parameters
----------
owner : str
The owner of the cache file is generally the name of the source that generated the cache.
extension : str, optional
Extension filename (such as ".nc" for NetCDF, etc.), by default ".cache"
Returns
-------
path : str
Full path to the cache file.
"""
m = hashlib.sha256()
m.update(owner.encode("utf-8"))
m.update(json.dumps(args, sort_keys=True).encode("utf-8"))
m.update(json.dumps(hash_extra, sort_keys=True).encode("utf-8"))
m.update(json.dumps(extension, sort_keys=True).encode("utf-8"))
if replace is not None:
# Don't replace files that are not in the cache
if not file_in_cache_directory(replace):
replace = None
path = os.path.join(
SETTINGS.get("cache-directory"),
"{}-{}{}".format(
owner.lower(),
m.hexdigest(),
extension,
),
)
record = register_cache_file(path, owner, args)
if os.path.exists(path):
if callable(force):
owner_data = record["owner_data"]
if owner_data is not None:
owner_data = json.loads(owner_data)
force = force(args, path, owner_data)
if force:
decache_file(path)
if not os.path.exists(path):
lock = path + ".lock"
with FileLock(lock):
if not os.path.exists(
path
): # Check again, another thread/process may have created the file
owner_data = create(path + ".tmp", args)
os.rename(path + ".tmp", path)
update_entry(path, owner_data)
check_cache_size()
try:
os.unlink(lock)
except OSError:
pass
return path
def auxiliary_cache_file(
owner,
path,
index=0,
content=None,
extension=".cache",
):
# Create an auxiliary cache file
# to be used for example to cache an index
# It is invalidated if `path` is changed
stat = os.stat(path)
def create(target, args):
# Simply touch the file
with open(target, "w") as f:
if content:
f.write(content)
return cache_file(
owner,
create,
(
path,
stat.st_ctime,
stat.st_mtime,
stat.st_size,
index,
),
extension=extension,
)
# housekeeping()
SETTINGS.on_change(settings_changed)
| """Update cache size and size of each file in the database ."""
with self.connection as db:
update = []
commit = False
for n in db.execute("SELECT path FROM cache WHERE size IS NULL"):
try:
path = n[0]
if os.path.isdir(path):
kind = "directory"
size = 0
for root, _, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
else:
kind = "file"
size = os.path.getsize(path)
update.append((size, kind, path))
except Exception:
if clean:
db.execute("DELETE from cache WHERE path=?", (path,))
commit = True
if update:
db.executemany("UPDATE cache SET size=?, type=? WHERE path=?", update)
if update or commit:
db.commit() | identifier_body |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#main").append(funToughts);
// s = s.slice(1);
// s = s.replace("u","U");
//
////////////////////////////////////////// Working Code //////////////////////////////////////////////////////////
// var bio = {
// "name" : "Peter Chen",
// "role" : "Web Developer",
// "contact_info" : ["9088017841",
// "[email protected]",
// "twitter.com/peter_butter1",
// "github.com/chendddong",
// "dcyou.tech.blog",
// "Princeton, NJ, 08540"],
// "pic_url" : "images/me.jpg",
// "welcome_msg" : "Stay Hungry, Stay foolish",
// "skills" : ["Java" , "Web development", "Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
// }
// var HTMLheaderName = '<h1 id="name">%data%</h1>';
// var HTMLheaderRole = '<span>%data%</span><hr>';
// var HTMLcontactGeneric = '<li class="flex-item"><span class="orange-text">%contact%</span><span class="white-text">%data%</span></li>';
// var HTMLmobile = '<li class="flex-item"><span class="orange-text">mobile</span><span class="white-text">%data%</span></li>';
// var HTMLemail = '<li class="flex-item"><span class="orange-text">email</span><span class="white-text">%data%</span></li>';
// var HTMLtwitter = '<li class="flex-item"><span class="orange-text">twitter</span><span class="white-text">%data%</span></li>';
// var HTMLgithub = '<li class="flex-item"><span class="orange-text">github</span><span class="white-text">%data%</span></li>';
// var HTMLblog = '<li class="flex-item"><span class="orange-text">blog</span><span class="white-text">%data%</span></li>';
// var HTMLlocation = '<li class="flex-item"><span class="orange-text">location</span><span class="white-text">%data%</span></li>';
// var HTMLbioPic = '<img src="%data%" class="biopic">';
// var HTMLwelcomeMsg = '<span class="welcome-message">%data%</span>';
// var HTMLskillsStart = '<h3 id="skills-h3">Skills at a Glance:</h3><ul id="skills" class="flex-column"></ul>';
// var HTMLskills = '<li class="flex-item"><span class="white-text">%data%</span></li>';
// var name = bio.name;
// var role = bio.role;
// var contact_mobile = bio.contact_info[0];
// var contact_email = bio.contact_info[1];
// var contact_twitter = bio.contact_info[2];
// var contact_github = bio.contact_info[3];
// var contact_blog = bio.contact_info[4];
// var contact_location = bio.contact_info[5];
// var picture = bio.pic_url;
// var welcome_msg = bio.welcome_msg;
// var skill_start = HTMLskillsStart;
// var skills = bio.skills;
// var formattedName = HTMLheaderName.replace("%data%",
// name);
// var formattedRole = HTMLheaderRole.replace("%data%",
// role);
// var formattedMobile = HTMLmobile.replace("%data%", contact_mobile);
// var formattedEmail = HTMLemail.replace("%data%", contact_email);
// var formattedTwitter = HTMLtwitter.replace("%data%", contact_twitter);
// var formattedGithub = HTMLgithub.replace("%data%", contact_github);
// var formattedBlog = HTMLblog.replace("%data%", contact_blog);
// var formattedLocation = HTMLlocation.replace("%data%", contact_location);
// var formattedPic = HTMLbioPic.replace("%data%",picture);
// var formattedWelcom_msg = HTMLwelcomeMsg.replace("%data%",welcome_msg);
// var formattedSkill_start = HTMLskillsStart;
// var formattedSkills = HTMLskills.replace("%data%",skills);
// $("#header").prepend(formattedRole);
// $("#header").prepend(formattedName);
// $("#topContacts").append(formattedMobile);
// $("#topContacts").append(formattedEmail);
// $("#topContacts").append(formattedTwitter);
// $("#topContacts").append(formattedGithub);
// $("#topContacts").append(formattedBlog);
// $("#topContacts").append(formattedLocation);
// $("#header").append(formattedPic);
// $("#header").append(formattedWelcom_msg);
// $("#header").append(formattedSkill_start);
// $("#skills-h3").append(formattedSkills);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////
// Four 4 Objects bio work project education //
///////////////////////////////////////////////
////////////////
// bio Object //
////////////////
var bio = {
"name" : "Peter Chen",
"role" : "Web Developer",
"welcome_msg" : "Stay Hungry, Stay foolish",
"pic_url" : "images/me.jpg",
"contacts" : {
"mobile" : "9088017841",
"email" : "[email protected]",
"twitter" : "twitter.com/peter_butter1",
"github" : "github.com/chendddong",
"blog" : "dcyou.tech.blog",
"location" : "Princeton, NJ, 08540"
},
"skills" : ["Java", "C", "Python", "Web development", "Data Structures & Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
}
/////////////////
// Work Object //
/////////////////
var work = {
"work":
[
{
"employer" : "SUNY Albany",
"title" : "Teaching Assistant",
"dates" : "2016 Fall",
"location" : "Albany",
"description" : "\u2022 Taught students algorithms concepts;<br />\u2022 Designed questions and labs."
},
{
"employer" : "Diwinet Technologies",
"title" : "Developer (Intern)",
"dates" : "2015 Summer",
"location" : "Beijing",
"description" : "\u2022 Assisted the software team by testing the device’s (Mr.Water) WIFI connection and improved code using VB to count link failures;<br /> \u2022 Participated the design and development of the Mr.Water’s monitoring website."
},
{
"employer" : "China Central Television Business Channel",
"title" : "Reporter And Editor",
"dates" : "June 2012 - June 2015",
"location" : "Beijing",
"description" : "\u2022 Mainly responsible for the interviews, editing, and broadcast of technological news;<br /> \u2022 Wrote and edited many press releases with depth related to political, economic and cultural;<br /> \u2022 Participated in producing the documentary films."
}
]
}
///////////////////////
// Education Object! //
///////////////////////
var education = {
"schools": [{
"school_name": "SUNY-Albany",
"degree": "Master of Science",
"dates": "2017-05-17",
"location": "Albany, NY, 12222",
"url" : "http://albany.edu",
"major": ["CS", "Statistics"],
"online-course":
[
{
"title" : "Machine Learning Specialization",
"school" : "University of Washington on Coursera",
"dates" : "2016 Fall - 2017 Winter",
"url" : "https://www.coursera.org/account/accomplishments/specialization/certificate/3LHXHNAQC4QK"
},
{
"title" : "Full Stack Web Developer Nanodegree",
"school" : "Udacity",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/Udacity"
},
{
"title" : "Front End Development Certification",
"school" : "freeCodeCamp",
"dates" : "2017 Spring till now",
"url" : "https://www.freecodecamp.com/chendddong"
},
{
"title" : "A Self-taught Education",
"school" : "OSSU",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/OSSU"
}
]
},
{
"school_name": "CUC",
"degree": "BA",
"dates": "2012-06-17",
"location": "Beijing, China",
"major": ["Journalism"],
"online-course":
[
{
"title" : "Java Developer",
"school" : "Beida Jade Bird",
"dates" : "2015 Summer",
"url" : "http://www.beidaqingniao.org/index.html"
}
]
}
]
}
////////////////////
// Project Object //
////////////////////
var projects = {
"projects":
[
{
"title": "Page Rank",
"dates": "2017 Winter",
"description": "Implemented page rank algorithm by using the data set from wiki, which is utilized by Google Search",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "AI.DJ",
"dates": "2016 Spring",
"description": "Recommend songs to users based on Users thumbs ups and downs",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "DocHub",
"dates": "2015 Winter",
"description": "Designed, built, deployed an online web application called DocHub which is aimed to help patients and doctors schedule appointments 7x24.",
"images": ["images/197x148.gif", "images/197x148.gif"]
}
]
}
///////////////////////////////////////////////////////
/////////////////
// Bio Session //
/////////////////
///////////////////////
// Education Section //
///////////////////////
//////////////////////
// Working Session //
//////////////////////
function displayWork() {
| playWork();
//////////////////////
// Projects section //
//////////////////////
projects.display = function() {
$("#projects").append(HTMLprojectStart);
for (key in projects.projects){
if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projects.projects[key].title);
$(".project-entry:last").append(titleHTML);
var datesHTML = HTMLprojectDates.replace("%data%", projects.projects[key].dates);
$(".project-entry:last").append(datesHTML);
var descriptionHTML = HTMLprojectDescription.replace("%data%", projects.projects[key].description);
$(".project-entry:last").append(descriptionHTML);
if (projects.projects[key].images.length > 0) {
for (image in projects.projects[key].images) {
var imagesHTML = HTMLprojectImage.replace("%data%",projects.projects
[key].images[image]);
$(".project-entry:last").append(imagesHTML);
}
}
}
}
}
projects.display();
/////////////////
// Map Section //
/////////////////
$("#mapDiv").append(googleMap);
// $(document).load(initializeMap());
| $("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace("%data%", work.work[key].title);
$("a:last").append(titleHTML);
var datesHTML = HTMLworkDates.replace("%data%", work.work[key].dates);
$(".work-entry:last").append(datesHTML);
var locationHTML = HTMLworkLocation.replace("%data%", work.work
[key].location);
$(".work-entry:last").append(locationHTML);
var descriptionHTML = HTMLworkDescription.replace("%data%", work.work
[key].description);
$(".work-entry:last").append(descriptionHTML);
}
}
}
dis | identifier_body |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#main").append(funToughts);
// s = s.slice(1);
// s = s.replace("u","U");
//
////////////////////////////////////////// Working Code //////////////////////////////////////////////////////////
// var bio = {
// "name" : "Peter Chen",
// "role" : "Web Developer",
// "contact_info" : ["9088017841",
// "[email protected]",
// "twitter.com/peter_butter1",
// "github.com/chendddong",
// "dcyou.tech.blog",
// "Princeton, NJ, 08540"],
// "pic_url" : "images/me.jpg",
// "welcome_msg" : "Stay Hungry, Stay foolish",
// "skills" : ["Java" , "Web development", "Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
// }
// var HTMLheaderName = '<h1 id="name">%data%</h1>';
// var HTMLheaderRole = '<span>%data%</span><hr>';
// var HTMLcontactGeneric = '<li class="flex-item"><span class="orange-text">%contact%</span><span class="white-text">%data%</span></li>';
// var HTMLmobile = '<li class="flex-item"><span class="orange-text">mobile</span><span class="white-text">%data%</span></li>';
// var HTMLemail = '<li class="flex-item"><span class="orange-text">email</span><span class="white-text">%data%</span></li>';
// var HTMLtwitter = '<li class="flex-item"><span class="orange-text">twitter</span><span class="white-text">%data%</span></li>';
// var HTMLgithub = '<li class="flex-item"><span class="orange-text">github</span><span class="white-text">%data%</span></li>';
// var HTMLblog = '<li class="flex-item"><span class="orange-text">blog</span><span class="white-text">%data%</span></li>';
// var HTMLlocation = '<li class="flex-item"><span class="orange-text">location</span><span class="white-text">%data%</span></li>';
// var HTMLbioPic = '<img src="%data%" class="biopic">';
// var HTMLwelcomeMsg = '<span class="welcome-message">%data%</span>';
// var HTMLskillsStart = '<h3 id="skills-h3">Skills at a Glance:</h3><ul id="skills" class="flex-column"></ul>';
// var HTMLskills = '<li class="flex-item"><span class="white-text">%data%</span></li>';
// var name = bio.name;
// var role = bio.role;
// var contact_mobile = bio.contact_info[0];
// var contact_email = bio.contact_info[1];
// var contact_twitter = bio.contact_info[2];
// var contact_github = bio.contact_info[3];
// var contact_blog = bio.contact_info[4];
// var contact_location = bio.contact_info[5];
// var picture = bio.pic_url;
// var welcome_msg = bio.welcome_msg;
// var skill_start = HTMLskillsStart;
// var skills = bio.skills;
// var formattedName = HTMLheaderName.replace("%data%",
// name);
// var formattedRole = HTMLheaderRole.replace("%data%",
// role);
// var formattedMobile = HTMLmobile.replace("%data%", contact_mobile);
// var formattedEmail = HTMLemail.replace("%data%", contact_email);
// var formattedTwitter = HTMLtwitter.replace("%data%", contact_twitter);
// var formattedGithub = HTMLgithub.replace("%data%", contact_github);
// var formattedBlog = HTMLblog.replace("%data%", contact_blog);
// var formattedLocation = HTMLlocation.replace("%data%", contact_location);
// var formattedPic = HTMLbioPic.replace("%data%",picture);
// var formattedWelcom_msg = HTMLwelcomeMsg.replace("%data%",welcome_msg);
// var formattedSkill_start = HTMLskillsStart;
// var formattedSkills = HTMLskills.replace("%data%",skills);
// $("#header").prepend(formattedRole);
// $("#header").prepend(formattedName);
// $("#topContacts").append(formattedMobile);
// $("#topContacts").append(formattedEmail);
// $("#topContacts").append(formattedTwitter);
// $("#topContacts").append(formattedGithub);
// $("#topContacts").append(formattedBlog);
// $("#topContacts").append(formattedLocation);
// $("#header").append(formattedPic);
// $("#header").append(formattedWelcom_msg);
// $("#header").append(formattedSkill_start);
// $("#skills-h3").append(formattedSkills);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////
// Four 4 Objects bio work project education //
///////////////////////////////////////////////
////////////////
// bio Object //
////////////////
var bio = {
"name" : "Peter Chen",
"role" : "Web Developer",
"welcome_msg" : "Stay Hungry, Stay foolish",
"pic_url" : "images/me.jpg",
"contacts" : {
"mobile" : "9088017841",
"email" : "[email protected]",
"twitter" : "twitter.com/peter_butter1",
"github" : "github.com/chendddong",
"blog" : "dcyou.tech.blog",
"location" : "Princeton, NJ, 08540"
},
"skills" : ["Java", "C", "Python", "Web development", "Data Structures & Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
}
/////////////////
// Work Object //
/////////////////
var work = {
"work":
[
{
"employer" : "SUNY Albany",
"title" : "Teaching Assistant",
"dates" : "2016 Fall",
"location" : "Albany",
"description" : "\u2022 Taught students algorithms concepts;<br />\u2022 Designed questions and labs."
},
{
"employer" : "Diwinet Technologies",
"title" : "Developer (Intern)",
"dates" : "2015 Summer",
"location" : "Beijing",
"description" : "\u2022 Assisted the software team by testing the device’s (Mr.Water) WIFI connection and improved code using VB to count link failures;<br /> \u2022 Participated the design and development of the Mr.Water’s monitoring website."
},
{
"employer" : "China Central Television Business Channel",
"title" : "Reporter And Editor",
"dates" : "June 2012 - June 2015",
"location" : "Beijing",
"description" : "\u2022 Mainly responsible for the interviews, editing, and broadcast of technological news;<br /> \u2022 Wrote and edited many press releases with depth related to political, economic and cultural;<br /> \u2022 Participated in producing the documentary films."
}
]
}
///////////////////////
// Education Object! //
///////////////////////
var education = {
"schools": [{
"school_name": "SUNY-Albany",
"degree": "Master of Science",
"dates": "2017-05-17",
"location": "Albany, NY, 12222",
"url" : "http://albany.edu",
"major": ["CS", "Statistics"],
"online-course":
[
{
"title" : "Machine Learning Specialization",
"school" : "University of Washington on Coursera",
"dates" : "2016 Fall - 2017 Winter",
"url" : "https://www.coursera.org/account/accomplishments/specialization/certificate/3LHXHNAQC4QK"
},
{
"title" : "Full Stack Web Developer Nanodegree",
"school" : "Udacity",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/Udacity"
},
{
"title" : "Front End Development Certification",
"school" : "freeCodeCamp",
"dates" : "2017 Spring till now",
"url" : "https://www.freecodecamp.com/chendddong"
},
{
"title" : "A Self-taught Education",
"school" : "OSSU",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/OSSU"
}
]
},
{
"school_name": "CUC",
"degree": "BA",
"dates": "2012-06-17",
"location": "Beijing, China",
"major": ["Journalism"],
"online-course":
[
{
"title" : "Java Developer",
"school" : "Beida Jade Bird",
"dates" : "2015 Summer",
"url" : "http://www.beidaqingniao.org/index.html"
}
]
}
]
}
////////////////////
// Project Object //
////////////////////
var projects = {
"projects":
[
{
"title": "Page Rank",
"dates": "2017 Winter",
"description": "Implemented page rank algorithm by using the data set from wiki, which is utilized by Google Search",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "AI.DJ",
"dates": "2016 Spring",
"description": "Recommend songs to users based on Users thumbs ups and downs",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "DocHub",
"dates": "2015 Winter",
"description": "Designed, built, deployed an online web application called DocHub which is aimed to help patients and doctors schedule appointments 7x24.",
"images": ["images/197x148.gif", "images/197x148.gif"]
}
]
}
///////////////////////////////////////////////////////
/////////////////
// Bio Session //
/////////////////
///////////////////////
// Education Section //
///////////////////////
//////////////////////
// Working Session //
//////////////////////
function displayWork() {
$("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace("%data%", work.work[key].title);
$("a:last").append(titleHTML);
var datesHTML = HTMLworkDates.replace("%data%", work.work[key].dates);
$(".work-entry:last").append(datesHTML);
var locationHTML = HTMLworkLocation.replace("%data%", work.work
[key].location);
$(".work-entry:last").append(locationHTML);
var descriptionHTML = HTMLworkDescription.replace("%data%", work.work
[key].description);
$(".work-entry:last").append(descriptionHTML);
}
}
}
displayWork();
//////////////////////
// Projects section //
//////////////////////
projects.display = function() { | if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projects.projects[key].title);
$(".project-entry:last").append(titleHTML);
var datesHTML = HTMLprojectDates.replace("%data%", projects.projects[key].dates);
$(".project-entry:last").append(datesHTML);
var descriptionHTML = HTMLprojectDescription.replace("%data%", projects.projects[key].description);
$(".project-entry:last").append(descriptionHTML);
if (projects.projects[key].images.length > 0) {
for (image in projects.projects[key].images) {
var imagesHTML = HTMLprojectImage.replace("%data%",projects.projects
[key].images[image]);
$(".project-entry:last").append(imagesHTML);
}
}
}
}
}
projects.display();
/////////////////
// Map Section //
/////////////////
$("#mapDiv").append(googleMap);
// $(document).load(initializeMap()); | $("#projects").append(HTMLprojectStart);
for (key in projects.projects){ | random_line_split |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#main").append(funToughts);
// s = s.slice(1);
// s = s.replace("u","U");
//
////////////////////////////////////////// Working Code //////////////////////////////////////////////////////////
// var bio = {
// "name" : "Peter Chen",
// "role" : "Web Developer",
// "contact_info" : ["9088017841",
// "[email protected]",
// "twitter.com/peter_butter1",
// "github.com/chendddong",
// "dcyou.tech.blog",
// "Princeton, NJ, 08540"],
// "pic_url" : "images/me.jpg",
// "welcome_msg" : "Stay Hungry, Stay foolish",
// "skills" : ["Java" , "Web development", "Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
// }
// var HTMLheaderName = '<h1 id="name">%data%</h1>';
// var HTMLheaderRole = '<span>%data%</span><hr>';
// var HTMLcontactGeneric = '<li class="flex-item"><span class="orange-text">%contact%</span><span class="white-text">%data%</span></li>';
// var HTMLmobile = '<li class="flex-item"><span class="orange-text">mobile</span><span class="white-text">%data%</span></li>';
// var HTMLemail = '<li class="flex-item"><span class="orange-text">email</span><span class="white-text">%data%</span></li>';
// var HTMLtwitter = '<li class="flex-item"><span class="orange-text">twitter</span><span class="white-text">%data%</span></li>';
// var HTMLgithub = '<li class="flex-item"><span class="orange-text">github</span><span class="white-text">%data%</span></li>';
// var HTMLblog = '<li class="flex-item"><span class="orange-text">blog</span><span class="white-text">%data%</span></li>';
// var HTMLlocation = '<li class="flex-item"><span class="orange-text">location</span><span class="white-text">%data%</span></li>';
// var HTMLbioPic = '<img src="%data%" class="biopic">';
// var HTMLwelcomeMsg = '<span class="welcome-message">%data%</span>';
// var HTMLskillsStart = '<h3 id="skills-h3">Skills at a Glance:</h3><ul id="skills" class="flex-column"></ul>';
// var HTMLskills = '<li class="flex-item"><span class="white-text">%data%</span></li>';
// var name = bio.name;
// var role = bio.role;
// var contact_mobile = bio.contact_info[0];
// var contact_email = bio.contact_info[1];
// var contact_twitter = bio.contact_info[2];
// var contact_github = bio.contact_info[3];
// var contact_blog = bio.contact_info[4];
// var contact_location = bio.contact_info[5];
// var picture = bio.pic_url;
// var welcome_msg = bio.welcome_msg;
// var skill_start = HTMLskillsStart;
// var skills = bio.skills;
// var formattedName = HTMLheaderName.replace("%data%",
// name);
// var formattedRole = HTMLheaderRole.replace("%data%",
// role);
// var formattedMobile = HTMLmobile.replace("%data%", contact_mobile);
// var formattedEmail = HTMLemail.replace("%data%", contact_email);
// var formattedTwitter = HTMLtwitter.replace("%data%", contact_twitter);
// var formattedGithub = HTMLgithub.replace("%data%", contact_github);
// var formattedBlog = HTMLblog.replace("%data%", contact_blog);
// var formattedLocation = HTMLlocation.replace("%data%", contact_location);
// var formattedPic = HTMLbioPic.replace("%data%",picture);
// var formattedWelcom_msg = HTMLwelcomeMsg.replace("%data%",welcome_msg);
// var formattedSkill_start = HTMLskillsStart;
// var formattedSkills = HTMLskills.replace("%data%",skills);
// $("#header").prepend(formattedRole);
// $("#header").prepend(formattedName);
// $("#topContacts").append(formattedMobile);
// $("#topContacts").append(formattedEmail);
// $("#topContacts").append(formattedTwitter);
// $("#topContacts").append(formattedGithub);
// $("#topContacts").append(formattedBlog);
// $("#topContacts").append(formattedLocation);
// $("#header").append(formattedPic);
// $("#header").append(formattedWelcom_msg);
// $("#header").append(formattedSkill_start);
// $("#skills-h3").append(formattedSkills);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////
// Four 4 Objects bio work project education //
///////////////////////////////////////////////
////////////////
// bio Object //
////////////////
var bio = {
"name" : "Peter Chen",
"role" : "Web Developer",
"welcome_msg" : "Stay Hungry, Stay foolish",
"pic_url" : "images/me.jpg",
"contacts" : {
"mobile" : "9088017841",
"email" : "[email protected]",
"twitter" : "twitter.com/peter_butter1",
"github" : "github.com/chendddong",
"blog" : "dcyou.tech.blog",
"location" : "Princeton, NJ, 08540"
},
"skills" : ["Java", "C", "Python", "Web development", "Data Structures & Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
}
/////////////////
// Work Object //
/////////////////
var work = {
"work":
[
{
"employer" : "SUNY Albany",
"title" : "Teaching Assistant",
"dates" : "2016 Fall",
"location" : "Albany",
"description" : "\u2022 Taught students algorithms concepts;<br />\u2022 Designed questions and labs."
},
{
"employer" : "Diwinet Technologies",
"title" : "Developer (Intern)",
"dates" : "2015 Summer",
"location" : "Beijing",
"description" : "\u2022 Assisted the software team by testing the device’s (Mr.Water) WIFI connection and improved code using VB to count link failures;<br /> \u2022 Participated the design and development of the Mr.Water’s monitoring website."
},
{
"employer" : "China Central Television Business Channel",
"title" : "Reporter And Editor",
"dates" : "June 2012 - June 2015",
"location" : "Beijing",
"description" : "\u2022 Mainly responsible for the interviews, editing, and broadcast of technological news;<br /> \u2022 Wrote and edited many press releases with depth related to political, economic and cultural;<br /> \u2022 Participated in producing the documentary films."
}
]
}
///////////////////////
// Education Object! //
///////////////////////
var education = {
"schools": [{
"school_name": "SUNY-Albany",
"degree": "Master of Science",
"dates": "2017-05-17",
"location": "Albany, NY, 12222",
"url" : "http://albany.edu",
"major": ["CS", "Statistics"],
"online-course":
[
{
"title" : "Machine Learning Specialization",
"school" : "University of Washington on Coursera",
"dates" : "2016 Fall - 2017 Winter",
"url" : "https://www.coursera.org/account/accomplishments/specialization/certificate/3LHXHNAQC4QK"
},
{
"title" : "Full Stack Web Developer Nanodegree",
"school" : "Udacity",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/Udacity"
},
{
"title" : "Front End Development Certification",
"school" : "freeCodeCamp",
"dates" : "2017 Spring till now",
"url" : "https://www.freecodecamp.com/chendddong"
},
{
"title" : "A Self-taught Education",
"school" : "OSSU",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/OSSU"
}
]
},
{
"school_name": "CUC",
"degree": "BA",
"dates": "2012-06-17",
"location": "Beijing, China",
"major": ["Journalism"],
"online-course":
[
{
"title" : "Java Developer",
"school" : "Beida Jade Bird",
"dates" : "2015 Summer",
"url" : "http://www.beidaqingniao.org/index.html"
}
]
}
]
}
////////////////////
// Project Object //
////////////////////
var projects = {
"projects":
[
{
"title": "Page Rank",
"dates": "2017 Winter",
"description": "Implemented page rank algorithm by using the data set from wiki, which is utilized by Google Search",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "AI.DJ",
"dates": "2016 Spring",
"description": "Recommend songs to users based on Users thumbs ups and downs",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "DocHub",
"dates": "2015 Winter",
"description": "Designed, built, deployed an online web application called DocHub which is aimed to help patients and doctors schedule appointments 7x24.",
"images": ["images/197x148.gif", "images/197x148.gif"]
}
]
}
///////////////////////////////////////////////////////
/////////////////
// Bio Session //
/////////////////
///////////////////////
// Education Section //
///////////////////////
//////////////////////
// Working Session //
//////////////////////
function displayWork() {
$("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace("%data%", work.work[key].title);
$("a:last").append(titleHTML);
var datesHTML = HTMLworkDates.replace("%data%", work.work[key].dates);
$(".work-entry:last").append(datesHTML);
var locationHTML = HTMLworkLocation.replace("%data%", work.work
[key].location);
$(".work-entry:last").append(locationHTML);
var descriptionHTML = HTMLworkDescription.replace("%data%", work.work
[key].description);
$(".work-entry:last").append(descriptionHTML);
}
}
}
displayWork();
//////////////////////
// Projects section //
//////////////////////
projects.display = function() {
$("#projects").append(HTMLprojectStart);
for (key in projects.projects){
if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projects.projects[key].title);
$(".project-entry:last").append(titleHTML);
var datesHTML = HTMLprojectDates.replace("%data%", projects.projects[key].dates);
$(".project-entry:last").append(datesHTML);
var descriptionHTML = HTMLprojectDescription.replace("%data%", projects.projects[key].description);
$(".project-entry:last").append(descriptionHTML);
if (projects.projects[key].images.length > 0) {
| }
}
}
projects.display();
/////////////////
// Map Section //
/////////////////
$("#mapDiv").append(googleMap);
// $(document).load(initializeMap());
| for (image in projects.projects[key].images) {
var imagesHTML = HTMLprojectImage.replace("%data%",projects.projects
[key].images[image]);
$(".project-entry:last").append(imagesHTML);
}
}
| conditional_block |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#main").append(funToughts);
// s = s.slice(1);
// s = s.replace("u","U");
//
////////////////////////////////////////// Working Code //////////////////////////////////////////////////////////
// var bio = {
// "name" : "Peter Chen",
// "role" : "Web Developer",
// "contact_info" : ["9088017841",
// "[email protected]",
// "twitter.com/peter_butter1",
// "github.com/chendddong",
// "dcyou.tech.blog",
// "Princeton, NJ, 08540"],
// "pic_url" : "images/me.jpg",
// "welcome_msg" : "Stay Hungry, Stay foolish",
// "skills" : ["Java" , "Web development", "Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
// }
// var HTMLheaderName = '<h1 id="name">%data%</h1>';
// var HTMLheaderRole = '<span>%data%</span><hr>';
// var HTMLcontactGeneric = '<li class="flex-item"><span class="orange-text">%contact%</span><span class="white-text">%data%</span></li>';
// var HTMLmobile = '<li class="flex-item"><span class="orange-text">mobile</span><span class="white-text">%data%</span></li>';
// var HTMLemail = '<li class="flex-item"><span class="orange-text">email</span><span class="white-text">%data%</span></li>';
// var HTMLtwitter = '<li class="flex-item"><span class="orange-text">twitter</span><span class="white-text">%data%</span></li>';
// var HTMLgithub = '<li class="flex-item"><span class="orange-text">github</span><span class="white-text">%data%</span></li>';
// var HTMLblog = '<li class="flex-item"><span class="orange-text">blog</span><span class="white-text">%data%</span></li>';
// var HTMLlocation = '<li class="flex-item"><span class="orange-text">location</span><span class="white-text">%data%</span></li>';
// var HTMLbioPic = '<img src="%data%" class="biopic">';
// var HTMLwelcomeMsg = '<span class="welcome-message">%data%</span>';
// var HTMLskillsStart = '<h3 id="skills-h3">Skills at a Glance:</h3><ul id="skills" class="flex-column"></ul>';
// var HTMLskills = '<li class="flex-item"><span class="white-text">%data%</span></li>';
// var name = bio.name;
// var role = bio.role;
// var contact_mobile = bio.contact_info[0];
// var contact_email = bio.contact_info[1];
// var contact_twitter = bio.contact_info[2];
// var contact_github = bio.contact_info[3];
// var contact_blog = bio.contact_info[4];
// var contact_location = bio.contact_info[5];
// var picture = bio.pic_url;
// var welcome_msg = bio.welcome_msg;
// var skill_start = HTMLskillsStart;
// var skills = bio.skills;
// var formattedName = HTMLheaderName.replace("%data%",
// name);
// var formattedRole = HTMLheaderRole.replace("%data%",
// role);
// var formattedMobile = HTMLmobile.replace("%data%", contact_mobile);
// var formattedEmail = HTMLemail.replace("%data%", contact_email);
// var formattedTwitter = HTMLtwitter.replace("%data%", contact_twitter);
// var formattedGithub = HTMLgithub.replace("%data%", contact_github);
// var formattedBlog = HTMLblog.replace("%data%", contact_blog);
// var formattedLocation = HTMLlocation.replace("%data%", contact_location);
// var formattedPic = HTMLbioPic.replace("%data%",picture);
// var formattedWelcom_msg = HTMLwelcomeMsg.replace("%data%",welcome_msg);
// var formattedSkill_start = HTMLskillsStart;
// var formattedSkills = HTMLskills.replace("%data%",skills);
// $("#header").prepend(formattedRole);
// $("#header").prepend(formattedName);
// $("#topContacts").append(formattedMobile);
// $("#topContacts").append(formattedEmail);
// $("#topContacts").append(formattedTwitter);
// $("#topContacts").append(formattedGithub);
// $("#topContacts").append(formattedBlog);
// $("#topContacts").append(formattedLocation);
// $("#header").append(formattedPic);
// $("#header").append(formattedWelcom_msg);
// $("#header").append(formattedSkill_start);
// $("#skills-h3").append(formattedSkills);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////
// Four 4 Objects bio work project education //
///////////////////////////////////////////////
////////////////
// bio Object //
////////////////
var bio = {
"name" : "Peter Chen",
"role" : "Web Developer",
"welcome_msg" : "Stay Hungry, Stay foolish",
"pic_url" : "images/me.jpg",
"contacts" : {
"mobile" : "9088017841",
"email" : "[email protected]",
"twitter" : "twitter.com/peter_butter1",
"github" : "github.com/chendddong",
"blog" : "dcyou.tech.blog",
"location" : "Princeton, NJ, 08540"
},
"skills" : ["Java", "C", "Python", "Web development", "Data Structures & Algorithms", "Machine Learning", "Data Analysis", "Anomaly Detection"]
}
/////////////////
// Work Object //
/////////////////
var work = {
"work":
[
{
"employer" : "SUNY Albany",
"title" : "Teaching Assistant",
"dates" : "2016 Fall",
"location" : "Albany",
"description" : "\u2022 Taught students algorithms concepts;<br />\u2022 Designed questions and labs."
},
{
"employer" : "Diwinet Technologies",
"title" : "Developer (Intern)",
"dates" : "2015 Summer",
"location" : "Beijing",
"description" : "\u2022 Assisted the software team by testing the device’s (Mr.Water) WIFI connection and improved code using VB to count link failures;<br /> \u2022 Participated the design and development of the Mr.Water’s monitoring website."
},
{
"employer" : "China Central Television Business Channel",
"title" : "Reporter And Editor",
"dates" : "June 2012 - June 2015",
"location" : "Beijing",
"description" : "\u2022 Mainly responsible for the interviews, editing, and broadcast of technological news;<br /> \u2022 Wrote and edited many press releases with depth related to political, economic and cultural;<br /> \u2022 Participated in producing the documentary films."
}
]
}
///////////////////////
// Education Object! //
///////////////////////
var education = {
"schools": [{
"school_name": "SUNY-Albany",
"degree": "Master of Science",
"dates": "2017-05-17",
"location": "Albany, NY, 12222",
"url" : "http://albany.edu",
"major": ["CS", "Statistics"],
"online-course":
[
{
"title" : "Machine Learning Specialization",
"school" : "University of Washington on Coursera",
"dates" : "2016 Fall - 2017 Winter",
"url" : "https://www.coursera.org/account/accomplishments/specialization/certificate/3LHXHNAQC4QK"
},
{
"title" : "Full Stack Web Developer Nanodegree",
"school" : "Udacity",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/Udacity"
},
{
"title" : "Front End Development Certification",
"school" : "freeCodeCamp",
"dates" : "2017 Spring till now",
"url" : "https://www.freecodecamp.com/chendddong"
},
{
"title" : "A Self-taught Education",
"school" : "OSSU",
"dates" : "2017 Spring till now",
"url" : "https://github.com/chendddong/OSSU"
}
]
},
{
"school_name": "CUC",
"degree": "BA",
"dates": "2012-06-17",
"location": "Beijing, China",
"major": ["Journalism"],
"online-course":
[
{
"title" : "Java Developer",
"school" : "Beida Jade Bird",
"dates" : "2015 Summer",
"url" : "http://www.beidaqingniao.org/index.html"
}
]
}
]
}
////////////////////
// Project Object //
////////////////////
var projects = {
"projects":
[
{
"title": "Page Rank",
"dates": "2017 Winter",
"description": "Implemented page rank algorithm by using the data set from wiki, which is utilized by Google Search",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "AI.DJ",
"dates": "2016 Spring",
"description": "Recommend songs to users based on Users thumbs ups and downs",
"images": ["images/197x148.gif", "images/197x148.gif"]
},
{
"title": "DocHub",
"dates": "2015 Winter",
"description": "Designed, built, deployed an online web application called DocHub which is aimed to help patients and doctors schedule appointments 7x24.",
"images": ["images/197x148.gif", "images/197x148.gif"]
}
]
}
///////////////////////////////////////////////////////
/////////////////
// Bio Session //
/////////////////
///////////////////////
// Education Section //
///////////////////////
//////////////////////
// Working Session //
//////////////////////
function disp |
$("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace("%data%", work.work[key].title);
$("a:last").append(titleHTML);
var datesHTML = HTMLworkDates.replace("%data%", work.work[key].dates);
$(".work-entry:last").append(datesHTML);
var locationHTML = HTMLworkLocation.replace("%data%", work.work
[key].location);
$(".work-entry:last").append(locationHTML);
var descriptionHTML = HTMLworkDescription.replace("%data%", work.work
[key].description);
$(".work-entry:last").append(descriptionHTML);
}
}
}
displayWork();
//////////////////////
// Projects section //
//////////////////////
projects.display = function() {
$("#projects").append(HTMLprojectStart);
for (key in projects.projects){
if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projects.projects[key].title);
$(".project-entry:last").append(titleHTML);
var datesHTML = HTMLprojectDates.replace("%data%", projects.projects[key].dates);
$(".project-entry:last").append(datesHTML);
var descriptionHTML = HTMLprojectDescription.replace("%data%", projects.projects[key].description);
$(".project-entry:last").append(descriptionHTML);
if (projects.projects[key].images.length > 0) {
for (image in projects.projects[key].images) {
var imagesHTML = HTMLprojectImage.replace("%data%",projects.projects
[key].images[image]);
$(".project-entry:last").append(imagesHTML);
}
}
}
}
}
projects.display();
/////////////////
// Map Section //
/////////////////
$("#mapDiv").append(googleMap);
// $(document).load(initializeMap());
| layWork() { | identifier_name |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/pastelnetwork/gonode/common/service/userdata"
"github.com/pastelnetwork/gonode/walletnode/services/userdataprocess/node"
)
// Task is the task of userdata processing.
type Task struct {
task.Task
*Service
// information of nodes process to set userdata
nodes node.List
resultChan chan *userdata.ProcessResult
err error
request *userdata.ProcessRequest
// information user pastelid to retrieve userdata
userpastelid string // user pastelid
resultChanGet chan *userdata.ProcessRequest
}
// Run starts the task
func (task *Task) Run(ctx context.Context) error {
ctx = log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", logPrefix, task.ID()))
log.WithContext(ctx).Debugf("Start task")
defer log.WithContext(ctx).Debugf("End task")
defer close(task.resultChan)
defer close(task.resultChanGet)
if err := task.run(ctx); err != nil {
task.err = err
task.UpdateStatus(StatusTaskFailure)
log.WithContext(ctx).WithError(err).Warnf("Task failed")
return nil
}
task.UpdateStatus(StatusTaskCompleted)
return nil
}
func (task *Task) run(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
maxNode := task.config.NumberSuperNodes
if task.request == nil {
// This is process to retrieve userdata
maxNode = 1 // Get data from 1 supernode only, currently we choose the 1st ranked supernode, but this may change later
}
// TODO: Make this init and connect to super nodes to generic reusable function to avoid code duplication (1)
// Retrieve supernodes with highest ranks.
topNodes, err := task.pastelTopNodes(ctx, maxNode)
if err != nil {
return err
}
if len(topNodes) < maxNode {
task.UpdateStatus(StatusErrorNotEnoughMasterNode)
return errors.New("unable to find enough Supernodes to send userdata to")
}
// Try to create mesh of supernodes, connecting to all supernodes in a different sequences.
var nodes node.List
var errs error
nodes, err = task.meshNodes(ctx, topNodes, 0) // Connect a mesh node with primary is 1st ranked SN
if err != nil {
if errors.IsContextCanceled(err) {
return err
}
errs = errors.Append(errs, err)
log.WithContext(ctx).WithError(err).Warnf("Could not create a mesh of the nodes")
}
if len(nodes) < maxNode {
return errors.Errorf("Could not create a mesh of %d nodes: %w", task.config.NumberSuperNodes, errs)
}
// Activate supernodes that are in the mesh.
nodes.Activate()
// Disconnect supernodes that are not involved in the process.
topNodes.DisconnectInactive()
// Cancel context when any connection is broken.
groupConnClose, _ := errgroup.WithContext(ctx)
groupConnClose.Go(func() error {
defer cancel()
return nodes.WaitConnClose(ctx)
})
task.UpdateStatus(StatusConnected)
task.nodes = nodes
if task.request == nil {
// PROCESS TO RETRIEVE USERDATA FROM METADATA LAYER
if err := nodes.ReceiveUserdata(ctx, task.userpastelid); err != nil {
return errors.Errorf("failed to receive userdata: %w", err)
}
// Post on result channel
node := nodes[0]
if node.ResultGet != nil {
task.resultChanGet <- node.ResultGet
} else {
return errors.Errorf("failed to receive userdata")
}
log.WithContext(ctx).Debug("Finished retrieve userdata")
} else {
// PROCESS TO SET/UPDATE USERDATA TO METADATA LAYER
// Get the previous block hash
// Get block num
blockHash := ""
blockNum, err := task.pastelClient.GetBlockCount(ctx)
if err != nil {
log.WithContext(ctx).Debug("failed to get block num: %w", err)
} else {
// Get block hash string
blockInfo, err := task.pastelClient.GetBlockVerbose1(ctx, blockNum)
if err != nil {
log.WithContext(ctx).Debug("failed to get block info with error: %w", err)
} else {
blockHash = blockInfo.Hash
}
}
task.request.PreviousBlockHash = blockHash
// Get the value of task.request.ArtistPastelIDPassphrase for sign data, then empty it in the request to make sure it not sent to supernodes
passphrase := task.request.ArtistPastelIDPassphrase
task.request.ArtistPastelIDPassphrase = ""
// Marshal task.request to byte array for signing
js, err := json.Marshal(task.request)
if err != nil {
return errors.Errorf("failed to encode request %w", err)
}
// Hash the request
hashvalue, err := userdata.Sha3256hash(js)
if err != nil {
return errors.Errorf("failed to hash request %w", err)
}
// Sign request with Wallet Node's pastelID and passphrase
signature, err := task.pastelClient.Sign(ctx, hashvalue, task.request.ArtistPastelID, passphrase, "ed448")
if err != nil {
return errors.Errorf("failed to sign ticket %w", err)
}
userdata := &userdata.ProcessRequestSigned{
Userdata: task.request,
UserdataHash: hex.EncodeToString(hashvalue),
Signature: hex.EncodeToString(signature),
}
// Send userdata to supernodes for storing in MDL's rqlite db.
if err := nodes.SendUserdata(ctx, userdata); err != nil {
return err
}
res, err := task.AggregateResult(ctx, nodes)
// Post on result channel
task.resultChan <- &res
if err != nil {
return err
}
log.WithContext(ctx).WithField("userdata_result", res).Debug("Posted userdata result")
}
// close the connections
for i := range nodes {
if err := nodes[i].Connection.Close(); err != nil {
log.WithContext(ctx).WithError(err).Debugf("failed to close connection to node %s", task.nodes[i].PastelID())
}
}
return nil
}
// AggregateResult aggregate all results return by all supernode, and consider it valid or not
func (task *Task) AggregateResult(_ context.Context, nodes node.List) (userdata.ProcessResult, error) {
// There is following common scenarios when supernodes response:
// 1. Secondary node and primary node both response userdata validation result error
// 2. Secondary node response userdata validation result success and primary node provide further processing result
// 3. Some node fail to response, or not in the 2 case above, then we need to aggregate result and consider what happen
// This part is for case 1 or 2 above, and we trust the primary node so we use its response
for _, node := range nodes {
node := node
if node.IsPrimary() {
result := node.Result
if result == nil {
return userdata.ProcessResult{}, errors.Errorf("Primary node have empty result")
}
return *result, nil
}
}
return userdata.ProcessResult{}, errors.Errorf("failed to Aggregate Result")
}
// meshNodes establishes communication between supernodes.
func (task *Task) meshNodes(ctx context.Context, nodes node.List, primaryIndex int) (node.List, error) {
var meshNodes node.List
primary := nodes[primaryIndex]
if err := primary.Connect(ctx, task.config.connectTimeout); err != nil {
return nil, err
}
if err := primary.Session(ctx, true); err != nil {
return nil, err
}
primary.SetPrimary(true)
if len(nodes) == 1 {
// If the number of nodes only have 1 node, we use this primary node and return directly
meshNodes.Add(primary)
return meshNodes, nil
}
nextConnCtx, nextConnCancel := context.WithCancel(ctx)
defer nextConnCancel()
// FIXME: ugly hack here. Need to make the Node and List to be safer
secondariesMtx := &sync.Mutex{}
var secondaries node.List
go func() {
for i, node := range nodes {
node := node
if i == primaryIndex {
continue
}
select {
case <-nextConnCtx.Done():
return
case <-time.After(task.config.connectToNextNodeDelay):
go func() {
defer errors.Recover(log.Fatal)
if err := node.Connect(ctx, task.config.connectTimeout); err != nil {
return
}
if err := node.Session(ctx, false); err != nil {
return
}
go func() {
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
secondaries.Add(node)
}()
if err := node.ConnectTo(ctx, primary.PastelID(), primary.SessID()); err != nil {
return
}
log.WithContext(ctx).Debugf("Seconary %q connected to primary", node)
}()
}
}
}()
acceptCtx, acceptCancel := context.WithTimeout(ctx, task.config.acceptNodesTimeout)
defer acceptCancel()
accepted, err := primary.AcceptedNodes(acceptCtx)
if err != nil {
return nil, err
}
meshNodes.Add(primary)
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
for _, pastelID := range accepted {
log.WithContext(ctx).Debugf("Primary accepted %q secondary node", pastelID)
node := secondaries.FindByPastelID(pastelID)
if node == nil {
return nil, errors.New("not found accepted node")
}
meshNodes.Add(node)
}
return meshNodes, nil
}
// pastelTopNodes retrieve the top super nodes we want to send userdata to, limit by maxNode
func (task *Task) pastelTopNodes(ctx context.Context, maxNode int) (node.List, error) { | var nodes node.List
mns, err := task.pastelClient.MasterNodesTop(ctx)
if err != nil {
return nil, err
}
count := 0
for _, mn := range mns {
count++
if count <= maxNode {
nodes = append(nodes, node.NewNode(task.Service.nodeClient, mn.ExtAddress, mn.ExtKey))
} else {
break
}
}
return nodes, nil
}
// Error returns task err
func (task *Task) Error() error {
return task.err
}
// SubscribeProcessResult returns the result state of userdata process
func (task *Task) SubscribeProcessResult() <-chan *userdata.ProcessResult {
return task.resultChan
}
// SubscribeProcessResultGet returns the result state of userdata process
func (task *Task) SubscribeProcessResultGet() <-chan *userdata.ProcessRequest {
return task.resultChanGet
}
// NewTask returns a new Task instance.
func NewTask(service *Service, request *userdata.ProcessRequest, userpastelid string) *Task {
return &Task{
Task: task.New(StatusTaskStarted),
Service: service,
request: request,
userpastelid: userpastelid,
resultChan: make(chan *userdata.ProcessResult),
resultChanGet: make(chan *userdata.ProcessRequest),
}
} | random_line_split |
|
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/pastelnetwork/gonode/common/service/userdata"
"github.com/pastelnetwork/gonode/walletnode/services/userdataprocess/node"
)
// Task is the task of userdata processing.
type Task struct {
task.Task
*Service
// information of nodes process to set userdata
nodes node.List
resultChan chan *userdata.ProcessResult
err error
request *userdata.ProcessRequest
// information user pastelid to retrieve userdata
userpastelid string // user pastelid
resultChanGet chan *userdata.ProcessRequest
}
// Run starts the task
func (task *Task) Run(ctx context.Context) error {
ctx = log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", logPrefix, task.ID()))
log.WithContext(ctx).Debugf("Start task")
defer log.WithContext(ctx).Debugf("End task")
defer close(task.resultChan)
defer close(task.resultChanGet)
if err := task.run(ctx); err != nil {
task.err = err
task.UpdateStatus(StatusTaskFailure)
log.WithContext(ctx).WithError(err).Warnf("Task failed")
return nil
}
task.UpdateStatus(StatusTaskCompleted)
return nil
}
func (task *Task) run(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
maxNode := task.config.NumberSuperNodes
if task.request == nil {
// This is process to retrieve userdata
maxNode = 1 // Get data from 1 supernode only, currently we choose the 1st ranked supernode, but this may change later
}
// TODO: Make this init and connect to super nodes to generic reusable function to avoid code duplication (1)
// Retrieve supernodes with highest ranks.
topNodes, err := task.pastelTopNodes(ctx, maxNode)
if err != nil {
return err
}
if len(topNodes) < maxNode {
task.UpdateStatus(StatusErrorNotEnoughMasterNode)
return errors.New("unable to find enough Supernodes to send userdata to")
}
// Try to create mesh of supernodes, connecting to all supernodes in a different sequences.
var nodes node.List
var errs error
nodes, err = task.meshNodes(ctx, topNodes, 0) // Connect a mesh node with primary is 1st ranked SN
if err != nil {
if errors.IsContextCanceled(err) {
return err
}
errs = errors.Append(errs, err)
log.WithContext(ctx).WithError(err).Warnf("Could not create a mesh of the nodes")
}
if len(nodes) < maxNode {
return errors.Errorf("Could not create a mesh of %d nodes: %w", task.config.NumberSuperNodes, errs)
}
// Activate supernodes that are in the mesh.
nodes.Activate()
// Disconnect supernodes that are not involved in the process.
topNodes.DisconnectInactive()
// Cancel context when any connection is broken.
groupConnClose, _ := errgroup.WithContext(ctx)
groupConnClose.Go(func() error {
defer cancel()
return nodes.WaitConnClose(ctx)
})
task.UpdateStatus(StatusConnected)
task.nodes = nodes
if task.request == nil {
// PROCESS TO RETRIEVE USERDATA FROM METADATA LAYER
if err := nodes.ReceiveUserdata(ctx, task.userpastelid); err != nil {
return errors.Errorf("failed to receive userdata: %w", err)
}
// Post on result channel
node := nodes[0]
if node.ResultGet != nil {
task.resultChanGet <- node.ResultGet
} else {
return errors.Errorf("failed to receive userdata")
}
log.WithContext(ctx).Debug("Finished retrieve userdata")
} else {
// PROCESS TO SET/UPDATE USERDATA TO METADATA LAYER
// Get the previous block hash
// Get block num
blockHash := ""
blockNum, err := task.pastelClient.GetBlockCount(ctx)
if err != nil {
log.WithContext(ctx).Debug("failed to get block num: %w", err)
} else {
// Get block hash string
blockInfo, err := task.pastelClient.GetBlockVerbose1(ctx, blockNum)
if err != nil {
log.WithContext(ctx).Debug("failed to get block info with error: %w", err)
} else {
blockHash = blockInfo.Hash
}
}
task.request.PreviousBlockHash = blockHash
// Get the value of task.request.ArtistPastelIDPassphrase for sign data, then empty it in the request to make sure it not sent to supernodes
passphrase := task.request.ArtistPastelIDPassphrase
task.request.ArtistPastelIDPassphrase = ""
// Marshal task.request to byte array for signing
js, err := json.Marshal(task.request)
if err != nil {
return errors.Errorf("failed to encode request %w", err)
}
// Hash the request
hashvalue, err := userdata.Sha3256hash(js)
if err != nil {
return errors.Errorf("failed to hash request %w", err)
}
// Sign request with Wallet Node's pastelID and passphrase
signature, err := task.pastelClient.Sign(ctx, hashvalue, task.request.ArtistPastelID, passphrase, "ed448")
if err != nil {
return errors.Errorf("failed to sign ticket %w", err)
}
userdata := &userdata.ProcessRequestSigned{
Userdata: task.request,
UserdataHash: hex.EncodeToString(hashvalue),
Signature: hex.EncodeToString(signature),
}
// Send userdata to supernodes for storing in MDL's rqlite db.
if err := nodes.SendUserdata(ctx, userdata); err != nil {
return err
}
res, err := task.AggregateResult(ctx, nodes)
// Post on result channel
task.resultChan <- &res
if err != nil {
return err
}
log.WithContext(ctx).WithField("userdata_result", res).Debug("Posted userdata result")
}
// close the connections
for i := range nodes {
if err := nodes[i].Connection.Close(); err != nil {
log.WithContext(ctx).WithError(err).Debugf("failed to close connection to node %s", task.nodes[i].PastelID())
}
}
return nil
}
// AggregateResult aggregate all results return by all supernode, and consider it valid or not
func (task *Task) AggregateResult(_ context.Context, nodes node.List) (userdata.ProcessResult, error) {
// There is following common scenarios when supernodes response:
// 1. Secondary node and primary node both response userdata validation result error
// 2. Secondary node response userdata validation result success and primary node provide further processing result
// 3. Some node fail to response, or not in the 2 case above, then we need to aggregate result and consider what happen
// This part is for case 1 or 2 above, and we trust the primary node so we use its response
for _, node := range nodes {
node := node
if node.IsPrimary() {
result := node.Result
if result == nil {
return userdata.ProcessResult{}, errors.Errorf("Primary node have empty result")
}
return *result, nil
}
}
return userdata.ProcessResult{}, errors.Errorf("failed to Aggregate Result")
}
// meshNodes establishes communication between supernodes.
func (task *Task) meshNodes(ctx context.Context, nodes node.List, primaryIndex int) (node.List, error) {
var meshNodes node.List
primary := nodes[primaryIndex]
if err := primary.Connect(ctx, task.config.connectTimeout); err != nil |
if err := primary.Session(ctx, true); err != nil {
return nil, err
}
primary.SetPrimary(true)
if len(nodes) == 1 {
// If the number of nodes only have 1 node, we use this primary node and return directly
meshNodes.Add(primary)
return meshNodes, nil
}
nextConnCtx, nextConnCancel := context.WithCancel(ctx)
defer nextConnCancel()
// FIXME: ugly hack here. Need to make the Node and List to be safer
secondariesMtx := &sync.Mutex{}
var secondaries node.List
go func() {
for i, node := range nodes {
node := node
if i == primaryIndex {
continue
}
select {
case <-nextConnCtx.Done():
return
case <-time.After(task.config.connectToNextNodeDelay):
go func() {
defer errors.Recover(log.Fatal)
if err := node.Connect(ctx, task.config.connectTimeout); err != nil {
return
}
if err := node.Session(ctx, false); err != nil {
return
}
go func() {
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
secondaries.Add(node)
}()
if err := node.ConnectTo(ctx, primary.PastelID(), primary.SessID()); err != nil {
return
}
log.WithContext(ctx).Debugf("Seconary %q connected to primary", node)
}()
}
}
}()
acceptCtx, acceptCancel := context.WithTimeout(ctx, task.config.acceptNodesTimeout)
defer acceptCancel()
accepted, err := primary.AcceptedNodes(acceptCtx)
if err != nil {
return nil, err
}
meshNodes.Add(primary)
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
for _, pastelID := range accepted {
log.WithContext(ctx).Debugf("Primary accepted %q secondary node", pastelID)
node := secondaries.FindByPastelID(pastelID)
if node == nil {
return nil, errors.New("not found accepted node")
}
meshNodes.Add(node)
}
return meshNodes, nil
}
// pastelTopNodes retrieve the top super nodes we want to send userdata to, limit by maxNode
func (task *Task) pastelTopNodes(ctx context.Context, maxNode int) (node.List, error) {
var nodes node.List
mns, err := task.pastelClient.MasterNodesTop(ctx)
if err != nil {
return nil, err
}
count := 0
for _, mn := range mns {
count++
if count <= maxNode {
nodes = append(nodes, node.NewNode(task.Service.nodeClient, mn.ExtAddress, mn.ExtKey))
} else {
break
}
}
return nodes, nil
}
// Error returns task err
func (task *Task) Error() error {
return task.err
}
// SubscribeProcessResult returns the result state of userdata process
func (task *Task) SubscribeProcessResult() <-chan *userdata.ProcessResult {
return task.resultChan
}
// SubscribeProcessResultGet returns the result state of userdata process
func (task *Task) SubscribeProcessResultGet() <-chan *userdata.ProcessRequest {
return task.resultChanGet
}
// NewTask returns a new Task instance.
func NewTask(service *Service, request *userdata.ProcessRequest, userpastelid string) *Task {
return &Task{
Task: task.New(StatusTaskStarted),
Service: service,
request: request,
userpastelid: userpastelid,
resultChan: make(chan *userdata.ProcessResult),
resultChanGet: make(chan *userdata.ProcessRequest),
}
}
| {
return nil, err
} | conditional_block |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/pastelnetwork/gonode/common/service/userdata"
"github.com/pastelnetwork/gonode/walletnode/services/userdataprocess/node"
)
// Task is the task of userdata processing.
type Task struct {
task.Task
*Service
// information of nodes process to set userdata
nodes node.List
resultChan chan *userdata.ProcessResult
err error
request *userdata.ProcessRequest
// information user pastelid to retrieve userdata
userpastelid string // user pastelid
resultChanGet chan *userdata.ProcessRequest
}
// Run starts the task
func (task *Task) | (ctx context.Context) error {
ctx = log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", logPrefix, task.ID()))
log.WithContext(ctx).Debugf("Start task")
defer log.WithContext(ctx).Debugf("End task")
defer close(task.resultChan)
defer close(task.resultChanGet)
if err := task.run(ctx); err != nil {
task.err = err
task.UpdateStatus(StatusTaskFailure)
log.WithContext(ctx).WithError(err).Warnf("Task failed")
return nil
}
task.UpdateStatus(StatusTaskCompleted)
return nil
}
func (task *Task) run(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
maxNode := task.config.NumberSuperNodes
if task.request == nil {
// This is process to retrieve userdata
maxNode = 1 // Get data from 1 supernode only, currently we choose the 1st ranked supernode, but this may change later
}
// TODO: Make this init and connect to super nodes to generic reusable function to avoid code duplication (1)
// Retrieve supernodes with highest ranks.
topNodes, err := task.pastelTopNodes(ctx, maxNode)
if err != nil {
return err
}
if len(topNodes) < maxNode {
task.UpdateStatus(StatusErrorNotEnoughMasterNode)
return errors.New("unable to find enough Supernodes to send userdata to")
}
// Try to create mesh of supernodes, connecting to all supernodes in a different sequences.
var nodes node.List
var errs error
nodes, err = task.meshNodes(ctx, topNodes, 0) // Connect a mesh node with primary is 1st ranked SN
if err != nil {
if errors.IsContextCanceled(err) {
return err
}
errs = errors.Append(errs, err)
log.WithContext(ctx).WithError(err).Warnf("Could not create a mesh of the nodes")
}
if len(nodes) < maxNode {
return errors.Errorf("Could not create a mesh of %d nodes: %w", task.config.NumberSuperNodes, errs)
}
// Activate supernodes that are in the mesh.
nodes.Activate()
// Disconnect supernodes that are not involved in the process.
topNodes.DisconnectInactive()
// Cancel context when any connection is broken.
groupConnClose, _ := errgroup.WithContext(ctx)
groupConnClose.Go(func() error {
defer cancel()
return nodes.WaitConnClose(ctx)
})
task.UpdateStatus(StatusConnected)
task.nodes = nodes
if task.request == nil {
// PROCESS TO RETRIEVE USERDATA FROM METADATA LAYER
if err := nodes.ReceiveUserdata(ctx, task.userpastelid); err != nil {
return errors.Errorf("failed to receive userdata: %w", err)
}
// Post on result channel
node := nodes[0]
if node.ResultGet != nil {
task.resultChanGet <- node.ResultGet
} else {
return errors.Errorf("failed to receive userdata")
}
log.WithContext(ctx).Debug("Finished retrieve userdata")
} else {
// PROCESS TO SET/UPDATE USERDATA TO METADATA LAYER
// Get the previous block hash
// Get block num
blockHash := ""
blockNum, err := task.pastelClient.GetBlockCount(ctx)
if err != nil {
log.WithContext(ctx).Debug("failed to get block num: %w", err)
} else {
// Get block hash string
blockInfo, err := task.pastelClient.GetBlockVerbose1(ctx, blockNum)
if err != nil {
log.WithContext(ctx).Debug("failed to get block info with error: %w", err)
} else {
blockHash = blockInfo.Hash
}
}
task.request.PreviousBlockHash = blockHash
// Get the value of task.request.ArtistPastelIDPassphrase for sign data, then empty it in the request to make sure it not sent to supernodes
passphrase := task.request.ArtistPastelIDPassphrase
task.request.ArtistPastelIDPassphrase = ""
// Marshal task.request to byte array for signing
js, err := json.Marshal(task.request)
if err != nil {
return errors.Errorf("failed to encode request %w", err)
}
// Hash the request
hashvalue, err := userdata.Sha3256hash(js)
if err != nil {
return errors.Errorf("failed to hash request %w", err)
}
// Sign request with Wallet Node's pastelID and passphrase
signature, err := task.pastelClient.Sign(ctx, hashvalue, task.request.ArtistPastelID, passphrase, "ed448")
if err != nil {
return errors.Errorf("failed to sign ticket %w", err)
}
userdata := &userdata.ProcessRequestSigned{
Userdata: task.request,
UserdataHash: hex.EncodeToString(hashvalue),
Signature: hex.EncodeToString(signature),
}
// Send userdata to supernodes for storing in MDL's rqlite db.
if err := nodes.SendUserdata(ctx, userdata); err != nil {
return err
}
res, err := task.AggregateResult(ctx, nodes)
// Post on result channel
task.resultChan <- &res
if err != nil {
return err
}
log.WithContext(ctx).WithField("userdata_result", res).Debug("Posted userdata result")
}
// close the connections
for i := range nodes {
if err := nodes[i].Connection.Close(); err != nil {
log.WithContext(ctx).WithError(err).Debugf("failed to close connection to node %s", task.nodes[i].PastelID())
}
}
return nil
}
// AggregateResult aggregate all results return by all supernode, and consider it valid or not
func (task *Task) AggregateResult(_ context.Context, nodes node.List) (userdata.ProcessResult, error) {
// There is following common scenarios when supernodes response:
// 1. Secondary node and primary node both response userdata validation result error
// 2. Secondary node response userdata validation result success and primary node provide further processing result
// 3. Some node fail to response, or not in the 2 case above, then we need to aggregate result and consider what happen
// This part is for case 1 or 2 above, and we trust the primary node so we use its response
for _, node := range nodes {
node := node
if node.IsPrimary() {
result := node.Result
if result == nil {
return userdata.ProcessResult{}, errors.Errorf("Primary node have empty result")
}
return *result, nil
}
}
return userdata.ProcessResult{}, errors.Errorf("failed to Aggregate Result")
}
// meshNodes establishes communication between supernodes.
func (task *Task) meshNodes(ctx context.Context, nodes node.List, primaryIndex int) (node.List, error) {
var meshNodes node.List
primary := nodes[primaryIndex]
if err := primary.Connect(ctx, task.config.connectTimeout); err != nil {
return nil, err
}
if err := primary.Session(ctx, true); err != nil {
return nil, err
}
primary.SetPrimary(true)
if len(nodes) == 1 {
// If the number of nodes only have 1 node, we use this primary node and return directly
meshNodes.Add(primary)
return meshNodes, nil
}
nextConnCtx, nextConnCancel := context.WithCancel(ctx)
defer nextConnCancel()
// FIXME: ugly hack here. Need to make the Node and List to be safer
secondariesMtx := &sync.Mutex{}
var secondaries node.List
go func() {
for i, node := range nodes {
node := node
if i == primaryIndex {
continue
}
select {
case <-nextConnCtx.Done():
return
case <-time.After(task.config.connectToNextNodeDelay):
go func() {
defer errors.Recover(log.Fatal)
if err := node.Connect(ctx, task.config.connectTimeout); err != nil {
return
}
if err := node.Session(ctx, false); err != nil {
return
}
go func() {
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
secondaries.Add(node)
}()
if err := node.ConnectTo(ctx, primary.PastelID(), primary.SessID()); err != nil {
return
}
log.WithContext(ctx).Debugf("Seconary %q connected to primary", node)
}()
}
}
}()
acceptCtx, acceptCancel := context.WithTimeout(ctx, task.config.acceptNodesTimeout)
defer acceptCancel()
accepted, err := primary.AcceptedNodes(acceptCtx)
if err != nil {
return nil, err
}
meshNodes.Add(primary)
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
for _, pastelID := range accepted {
log.WithContext(ctx).Debugf("Primary accepted %q secondary node", pastelID)
node := secondaries.FindByPastelID(pastelID)
if node == nil {
return nil, errors.New("not found accepted node")
}
meshNodes.Add(node)
}
return meshNodes, nil
}
// pastelTopNodes retrieve the top super nodes we want to send userdata to, limit by maxNode
func (task *Task) pastelTopNodes(ctx context.Context, maxNode int) (node.List, error) {
var nodes node.List
mns, err := task.pastelClient.MasterNodesTop(ctx)
if err != nil {
return nil, err
}
count := 0
for _, mn := range mns {
count++
if count <= maxNode {
nodes = append(nodes, node.NewNode(task.Service.nodeClient, mn.ExtAddress, mn.ExtKey))
} else {
break
}
}
return nodes, nil
}
// Error returns task err
func (task *Task) Error() error {
return task.err
}
// SubscribeProcessResult returns the result state of userdata process
func (task *Task) SubscribeProcessResult() <-chan *userdata.ProcessResult {
return task.resultChan
}
// SubscribeProcessResultGet returns the result state of userdata process
func (task *Task) SubscribeProcessResultGet() <-chan *userdata.ProcessRequest {
return task.resultChanGet
}
// NewTask returns a new Task instance.
func NewTask(service *Service, request *userdata.ProcessRequest, userpastelid string) *Task {
return &Task{
Task: task.New(StatusTaskStarted),
Service: service,
request: request,
userpastelid: userpastelid,
resultChan: make(chan *userdata.ProcessResult),
resultChanGet: make(chan *userdata.ProcessRequest),
}
}
| Run | identifier_name |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/pastelnetwork/gonode/common/service/userdata"
"github.com/pastelnetwork/gonode/walletnode/services/userdataprocess/node"
)
// Task is the task of userdata processing.
type Task struct {
task.Task
*Service
// information of nodes process to set userdata
nodes node.List
resultChan chan *userdata.ProcessResult
err error
request *userdata.ProcessRequest
// information user pastelid to retrieve userdata
userpastelid string // user pastelid
resultChanGet chan *userdata.ProcessRequest
}
// Run starts the task
func (task *Task) Run(ctx context.Context) error {
ctx = log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", logPrefix, task.ID()))
log.WithContext(ctx).Debugf("Start task")
defer log.WithContext(ctx).Debugf("End task")
defer close(task.resultChan)
defer close(task.resultChanGet)
if err := task.run(ctx); err != nil {
task.err = err
task.UpdateStatus(StatusTaskFailure)
log.WithContext(ctx).WithError(err).Warnf("Task failed")
return nil
}
task.UpdateStatus(StatusTaskCompleted)
return nil
}
func (task *Task) run(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
maxNode := task.config.NumberSuperNodes
if task.request == nil {
// This is process to retrieve userdata
maxNode = 1 // Get data from 1 supernode only, currently we choose the 1st ranked supernode, but this may change later
}
// TODO: Make this init and connect to super nodes to generic reusable function to avoid code duplication (1)
// Retrieve supernodes with highest ranks.
topNodes, err := task.pastelTopNodes(ctx, maxNode)
if err != nil {
return err
}
if len(topNodes) < maxNode {
task.UpdateStatus(StatusErrorNotEnoughMasterNode)
return errors.New("unable to find enough Supernodes to send userdata to")
}
// Try to create mesh of supernodes, connecting to all supernodes in a different sequences.
var nodes node.List
var errs error
nodes, err = task.meshNodes(ctx, topNodes, 0) // Connect a mesh node with primary is 1st ranked SN
if err != nil {
if errors.IsContextCanceled(err) {
return err
}
errs = errors.Append(errs, err)
log.WithContext(ctx).WithError(err).Warnf("Could not create a mesh of the nodes")
}
if len(nodes) < maxNode {
return errors.Errorf("Could not create a mesh of %d nodes: %w", task.config.NumberSuperNodes, errs)
}
// Activate supernodes that are in the mesh.
nodes.Activate()
// Disconnect supernodes that are not involved in the process.
topNodes.DisconnectInactive()
// Cancel context when any connection is broken.
groupConnClose, _ := errgroup.WithContext(ctx)
groupConnClose.Go(func() error {
defer cancel()
return nodes.WaitConnClose(ctx)
})
task.UpdateStatus(StatusConnected)
task.nodes = nodes
if task.request == nil {
// PROCESS TO RETRIEVE USERDATA FROM METADATA LAYER
if err := nodes.ReceiveUserdata(ctx, task.userpastelid); err != nil {
return errors.Errorf("failed to receive userdata: %w", err)
}
// Post on result channel
node := nodes[0]
if node.ResultGet != nil {
task.resultChanGet <- node.ResultGet
} else {
return errors.Errorf("failed to receive userdata")
}
log.WithContext(ctx).Debug("Finished retrieve userdata")
} else {
// PROCESS TO SET/UPDATE USERDATA TO METADATA LAYER
// Get the previous block hash
// Get block num
blockHash := ""
blockNum, err := task.pastelClient.GetBlockCount(ctx)
if err != nil {
log.WithContext(ctx).Debug("failed to get block num: %w", err)
} else {
// Get block hash string
blockInfo, err := task.pastelClient.GetBlockVerbose1(ctx, blockNum)
if err != nil {
log.WithContext(ctx).Debug("failed to get block info with error: %w", err)
} else {
blockHash = blockInfo.Hash
}
}
task.request.PreviousBlockHash = blockHash
// Get the value of task.request.ArtistPastelIDPassphrase for sign data, then empty it in the request to make sure it not sent to supernodes
passphrase := task.request.ArtistPastelIDPassphrase
task.request.ArtistPastelIDPassphrase = ""
// Marshal task.request to byte array for signing
js, err := json.Marshal(task.request)
if err != nil {
return errors.Errorf("failed to encode request %w", err)
}
// Hash the request
hashvalue, err := userdata.Sha3256hash(js)
if err != nil {
return errors.Errorf("failed to hash request %w", err)
}
// Sign request with Wallet Node's pastelID and passphrase
signature, err := task.pastelClient.Sign(ctx, hashvalue, task.request.ArtistPastelID, passphrase, "ed448")
if err != nil {
return errors.Errorf("failed to sign ticket %w", err)
}
userdata := &userdata.ProcessRequestSigned{
Userdata: task.request,
UserdataHash: hex.EncodeToString(hashvalue),
Signature: hex.EncodeToString(signature),
}
// Send userdata to supernodes for storing in MDL's rqlite db.
if err := nodes.SendUserdata(ctx, userdata); err != nil {
return err
}
res, err := task.AggregateResult(ctx, nodes)
// Post on result channel
task.resultChan <- &res
if err != nil {
return err
}
log.WithContext(ctx).WithField("userdata_result", res).Debug("Posted userdata result")
}
// close the connections
for i := range nodes {
if err := nodes[i].Connection.Close(); err != nil {
log.WithContext(ctx).WithError(err).Debugf("failed to close connection to node %s", task.nodes[i].PastelID())
}
}
return nil
}
// AggregateResult aggregate all results return by all supernode, and consider it valid or not
func (task *Task) AggregateResult(_ context.Context, nodes node.List) (userdata.ProcessResult, error) {
// There is following common scenarios when supernodes response:
// 1. Secondary node and primary node both response userdata validation result error
// 2. Secondary node response userdata validation result success and primary node provide further processing result
// 3. Some node fail to response, or not in the 2 case above, then we need to aggregate result and consider what happen
// This part is for case 1 or 2 above, and we trust the primary node so we use its response
for _, node := range nodes {
node := node
if node.IsPrimary() {
result := node.Result
if result == nil {
return userdata.ProcessResult{}, errors.Errorf("Primary node have empty result")
}
return *result, nil
}
}
return userdata.ProcessResult{}, errors.Errorf("failed to Aggregate Result")
}
// meshNodes establishes communication between supernodes.
func (task *Task) meshNodes(ctx context.Context, nodes node.List, primaryIndex int) (node.List, error) {
var meshNodes node.List
primary := nodes[primaryIndex]
if err := primary.Connect(ctx, task.config.connectTimeout); err != nil {
return nil, err
}
if err := primary.Session(ctx, true); err != nil {
return nil, err
}
primary.SetPrimary(true)
if len(nodes) == 1 {
// If the number of nodes only have 1 node, we use this primary node and return directly
meshNodes.Add(primary)
return meshNodes, nil
}
nextConnCtx, nextConnCancel := context.WithCancel(ctx)
defer nextConnCancel()
// FIXME: ugly hack here. Need to make the Node and List to be safer
secondariesMtx := &sync.Mutex{}
var secondaries node.List
go func() {
for i, node := range nodes {
node := node
if i == primaryIndex {
continue
}
select {
case <-nextConnCtx.Done():
return
case <-time.After(task.config.connectToNextNodeDelay):
go func() {
defer errors.Recover(log.Fatal)
if err := node.Connect(ctx, task.config.connectTimeout); err != nil {
return
}
if err := node.Session(ctx, false); err != nil {
return
}
go func() {
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
secondaries.Add(node)
}()
if err := node.ConnectTo(ctx, primary.PastelID(), primary.SessID()); err != nil {
return
}
log.WithContext(ctx).Debugf("Seconary %q connected to primary", node)
}()
}
}
}()
acceptCtx, acceptCancel := context.WithTimeout(ctx, task.config.acceptNodesTimeout)
defer acceptCancel()
accepted, err := primary.AcceptedNodes(acceptCtx)
if err != nil {
return nil, err
}
meshNodes.Add(primary)
secondariesMtx.Lock()
defer secondariesMtx.Unlock()
for _, pastelID := range accepted {
log.WithContext(ctx).Debugf("Primary accepted %q secondary node", pastelID)
node := secondaries.FindByPastelID(pastelID)
if node == nil {
return nil, errors.New("not found accepted node")
}
meshNodes.Add(node)
}
return meshNodes, nil
}
// pastelTopNodes retrieve the top super nodes we want to send userdata to, limit by maxNode
func (task *Task) pastelTopNodes(ctx context.Context, maxNode int) (node.List, error) {
var nodes node.List
mns, err := task.pastelClient.MasterNodesTop(ctx)
if err != nil {
return nil, err
}
count := 0
for _, mn := range mns {
count++
if count <= maxNode {
nodes = append(nodes, node.NewNode(task.Service.nodeClient, mn.ExtAddress, mn.ExtKey))
} else {
break
}
}
return nodes, nil
}
// Error returns task err
func (task *Task) Error() error {
return task.err
}
// SubscribeProcessResult returns the result state of userdata process
func (task *Task) SubscribeProcessResult() <-chan *userdata.ProcessResult |
// SubscribeProcessResultGet returns the result state of userdata process
func (task *Task) SubscribeProcessResultGet() <-chan *userdata.ProcessRequest {
return task.resultChanGet
}
// NewTask returns a new Task instance.
func NewTask(service *Service, request *userdata.ProcessRequest, userpastelid string) *Task {
return &Task{
Task: task.New(StatusTaskStarted),
Service: service,
request: request,
userpastelid: userpastelid,
resultChan: make(chan *userdata.ProcessResult),
resultChanGet: make(chan *userdata.ProcessRequest),
}
}
| {
return task.resultChan
} | identifier_body |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf import settings as django_settings
from django.core.urlresolvers import get_callable
from django.db import models
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.http import HttpResponseRedirect
# 1.2 from django.views.decorators.csrf import csrf_protect
from feincms import settings
from feincms.models import Base
from feincms.templatetags import feincms_thumbnail
from feincms.translations import TranslatedObjectMixin, Translation, \
TranslatedObjectManager
import re
import os
import logging
from PIL import Image
# ------------------------------------------------------------------------
class CategoryManager(models.Manager):
"""
Simple manager which exists only to supply ``.select_related("parent")``
on querysets since we can't even __unicode__ efficiently without it.
"""
def get_query_set(self):
return super(CategoryManager, self).get_query_set().select_related("parent")
# ------------------------------------------------------------------------
class Category(models.Model):
"""
These categories are meant primarily for organizing media files in the
library.
"""
title = models.CharField(_('title'), max_length=200)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children', limit_choices_to={'parent__isnull': True},
verbose_name=_('parent'))
slug = models.SlugField(_('slug'), max_length=150)
class Meta:
ordering = ['parent__title', 'title']
verbose_name = _('category')
verbose_name_plural = _('categories')
objects = CategoryManager()
def __unicode__(self):
if self.parent_id:
return u'%s - %s' % (self.parent.title, self.title)
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['parent', 'title']
list_filter = ['parent']
list_per_page = 25
search_fields = ['title']
prepopulated_fields = { 'slug': ('title',), }
# ------------------------------------------------------------------------
class MediaFileBase(Base, TranslatedObjectMixin):
"""
Abstract media file class. Inherits from :class:`feincms.module.Base`
because of the (handy) extension mechanism.
"""
from django.core.files.storage import FileSystemStorage
default_storage_class = getattr(django_settings, 'DEFAULT_FILE_STORAGE',
'django.core.files.storage.FileSystemStorage')
default_storage = get_callable(default_storage_class)
fs = default_storage(location=settings.FEINCMS_MEDIALIBRARY_ROOT,
base_url=settings.FEINCMS_MEDIALIBRARY_URL)
file = models.FileField(_('file'), max_length=255, upload_to=settings.FEINCMS_MEDIALIBRARY_UPLOAD_TO, storage=fs)
type = models.CharField(_('file type'), max_length=12, editable=False, choices=())
created = models.DateTimeField(_('created'), editable=False, default=datetime.now)
copyright = models.CharField(_('copyright'), max_length=200, blank=True)
file_size = models.IntegerField(_("file size"), blank=True, null=True, editable=False)
categories = models.ManyToManyField(Category, verbose_name=_('categories'),
blank=True, null=True)
categories.category_filter = True
class Meta:
abstract = True
verbose_name = _('media file')
verbose_name_plural = _('media files')
objects = TranslatedObjectManager()
filetypes = [ ]
filetypes_dict = { }
def formatted_file_size(self):
return filesizeformat(self.file_size)
formatted_file_size.short_description = _("file size")
formatted_file_size.admin_order_field = 'file_size'
def formatted_created(self):
return self.created.strftime("%Y-%m-%d %H:%M")
formatted_created.short_description = _("created")
formatted_created.admin_order_field = 'created'
@classmethod
def reconfigure(cls, upload_to=None, storage=None):
f = cls._meta.get_field('file')
# Ugh. Copied relevant parts from django/db/models/fields/files.py
# FileField.__init__ (around line 225)
if storage:
f.storage = storage
if upload_to:
f.upload_to = upload_to
if callable(upload_to):
f.generate_filename = upload_to
@classmethod
def register_filetypes(cls, *types):
cls.filetypes[0:0] = types
choices = [ t[0:2] for t in cls.filetypes ]
cls.filetypes_dict = dict(choices)
cls._meta.get_field('type').choices[:] = choices
def __init__(self, *args, **kwargs):
|
def __unicode__(self):
trans = None
# This might be provided using a .extra() clause to avoid hundreds of extra queries:
if hasattr(self, "preferred_translation"):
trans = getattr(self, "preferred_translation", u"")
else:
try:
trans = unicode(self.translation)
except models.ObjectDoesNotExist:
pass
except AttributeError, e:
pass
if trans:
return trans
else:
return os.path.basename(self.file.name)
def get_absolute_url(self):
return self.file.url
def file_type(self):
t = self.filetypes_dict[self.type]
if self.type == 'image':
try:
from django.core.files.images import get_image_dimensions
d = get_image_dimensions(self.file.file)
if d: t += "<br/>%d×%d" % ( d[0], d[1] )
except IOError, e:
t += "<br/>(%s)" % e.strerror
return t
file_type.admin_order_field = 'type'
file_type.short_description = _('file type')
file_type.allow_tags = True
def file_info(self):
"""
Method for showing the file name in admin.
Note: This also includes a hidden field that can be used to extract
the file name later on, this can be used to access the file name from
JS, like for example a TinyMCE connector shim.
"""
from os.path import basename
from feincms.utils import shorten_string
return u'<input type="hidden" class="medialibrary_file_path" name="_media_path_%d" value="%s" /> %s' % (
self.id,
self.file.name,
shorten_string(basename(self.file.name), max_length=28), )
file_info.short_description = _('file info')
file_info.allow_tags = True
def determine_file_type(self, name):
"""
>>> t = MediaFileBase()
>>> t.determine_file_type('foobar.jpg')
'image'
>>> t.determine_file_type('foobar.PDF')
'pdf'
>>> t.determine_file_type('foobar.jpg.pdf')
'pdf'
>>> t.determine_file_type('foobar.jgp')
'other'
>>> t.determine_file_type('foobar-jpg')
'other'
"""
for type_key, type_name, type_test in self.filetypes:
if type_test(name):
return type_key
return self.filetypes[-1][0]
def save(self, *args, **kwargs):
if not self.id and not self.created:
self.created = datetime.now()
self.type = self.determine_file_type(self.file.name)
if self.file:
try:
self.file_size = self.file.size
except (OSError, IOError, ValueError), e:
logging.error("Unable to read file size for %s: %s", self, e)
# Try to detect things that are not really images
if self.type == 'image':
try:
try:
image = Image.open(self.file)
except (OSError, IOError):
image = Image.open(self.file.path)
# Rotate image based on exif data.
if image:
try:
exif = image._getexif()
except (AttributeError, IOError):
exif = False
# PIL < 1.1.7 chokes on JPEGs with minimal EXIF data and
# throws a KeyError deep in its guts.
except KeyError:
exif = False
if exif:
orientation = exif.get(274)
rotation = 0
if orientation == 3:
rotation = 180
elif orientation == 6:
rotation = 270
elif orientation == 8:
rotation = 90
if rotation:
image = image.rotate(rotation)
image.save(self.file.path)
except (OSError, IOError), e:
self.type = self.determine_file_type('***') # It's binary something
if getattr(self, '_original_file_path', None):
if self.file.path != self._original_file_path:
try:
os.unlink(self._original_file_path)
except:
pass
super(MediaFileBase, self).save(*args, **kwargs)
self.purge_translation_cache()
# ------------------------------------------------------------------------
MediaFileBase.register_filetypes(
# Should we be using imghdr.what instead of extension guessing?
('image', _('Image'), lambda f: re.compile(r'\.(bmp|jpe?g|jp2|jxr|gif|png|tiff?)$', re.IGNORECASE).search(f)),
('video', _('Video'), lambda f: re.compile(r'\.(mov|m[14]v|mp4|avi|mpe?g|qt|ogv|wmv)$', re.IGNORECASE).search(f)),
('audio', _('Audio'), lambda f: re.compile(r'\.(au|mp3|m4a|wma|oga|ram|wav)$', re.IGNORECASE).search(f)),
('pdf', _('PDF document'), lambda f: f.lower().endswith('.pdf')),
('swf', _('Flash'), lambda f: f.lower().endswith('.swf')),
('txt', _('Text'), lambda f: f.lower().endswith('.txt')),
('rtf', _('Rich Text'), lambda f: f.lower().endswith('.rtf')),
('zip', _('Zip archive'), lambda f: f.lower().endswith('.zip')),
('doc', _('Microsoft Word'), lambda f: re.compile(r'\.docx?$', re.IGNORECASE).search(f)),
('xls', _('Microsoft Excel'), lambda f: re.compile(r'\.xlsx?$', re.IGNORECASE).search(f)),
('ppt', _('Microsoft PowerPoint'), lambda f: re.compile(r'\.pptx?$', re.IGNORECASE).search(f)),
('other', _('Binary'), lambda f: True), # Must be last
)
# ------------------------------------------------------------------------
class MediaFile(MediaFileBase):
@classmethod
def register_extension(cls, register_fn):
register_fn(cls, MediaFileAdmin)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
class MediaFileTranslation(Translation(MediaFile)):
"""
Translated media file caption and description.
"""
caption = models.CharField(_('caption'), max_length=200)
description = models.TextField(_('description'), blank=True)
class Meta:
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
def __unicode__(self):
return self.caption
#-------------------------------------------------------------------------
class MediaFileTranslationInline(admin.StackedInline):
model = MediaFileTranslation
max_num = len(django_settings.LANGUAGES)
def admin_thumbnail(obj):
if obj.type == 'image':
image = None
try:
image = feincms_thumbnail.thumbnail(obj.file.name, '100x60')
except:
pass
if image:
return mark_safe(u"""
<a href="%(url)s" target="_blank">
<img src="%(image)s" alt="" />
</a>""" % {
'url': obj.file.url,
'image': image,})
return ''
admin_thumbnail.short_description = _('Preview')
admin_thumbnail.allow_tags = True
#-------------------------------------------------------------------------
class MediaFileAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
inlines = [MediaFileTranslationInline]
list_display = ['__unicode__', admin_thumbnail, 'file_type', 'copyright', 'file_info', 'formatted_file_size', 'formatted_created']
list_filter = ['type', 'categories']
list_per_page = 25
search_fields = ['copyright', 'file', 'translations__caption']
filter_horizontal = ("categories",)
def get_urls(self):
from django.conf.urls.defaults import url, patterns
urls = super(MediaFileAdmin, self).get_urls()
my_urls = patterns('',
url(r'^mediafile-bulk-upload/$', self.admin_site.admin_view(MediaFileAdmin.bulk_upload), {}, name='mediafile_bulk_upload')
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['categories'] = Category.objects.all()
return super(MediaFileAdmin, self).changelist_view(request, extra_context=extra_context)
@staticmethod
# 1.2 @csrf_protect
@permission_required('medialibrary.add_mediafile')
def bulk_upload(request):
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
def import_zipfile(request, category_id, data):
import zipfile
from os import path
category = None
if category_id:
category = Category.objects.get(pk=int(category_id))
try:
z = zipfile.ZipFile(data)
storage = MediaFile.fs
if not storage:
messages.error(request, _("Could not access storage"))
return
count = 0
for zi in z.infolist():
if not zi.filename.endswith('/'):
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
bname = path.basename(zi.filename)
if bname and not bname.startswith(".") and "." in bname:
fname, ext = path.splitext(bname)
target_fname = slugify(fname) + ext.lower()
mf = MediaFile()
mf.file.save(target_fname, ContentFile(z.read(zi.filename)))
mf.save()
if category:
mf.categories.add(category)
count += 1
messages.info(request, _("%d files imported") % count)
except Exception, e:
messages.error(request, _("ZIP file invalid: %s") % str(e))
return
if request.method == 'POST' and 'data' in request.FILES:
import_zipfile(request, request.POST.get('category'), request.FILES['data'])
else:
messages.error(request, _("No input file given"))
return HttpResponseRedirect(reverse('admin:medialibrary_mediafile_changelist'))
def queryset(self, request):
qs = super(MediaFileAdmin, self).queryset(request)
# FIXME: This is an ugly hack but it avoids 1-3 queries per *FILE*
# retrieving the translation information
if django_settings.DATABASE_ENGINE == 'postgresql_psycopg2':
qs = qs.extra(
select = {
'preferred_translation':
"""SELECT caption FROM medialibrary_mediafiletranslation
WHERE medialibrary_mediafiletranslation.parent_id = medialibrary_mediafile.id
ORDER BY
language_code = %s DESC,
language_code = %s DESC,
LENGTH(language_code) DESC
LIMIT 1
"""
},
select_params = (translation.get_language(), django_settings.LANGUAGE_CODE)
)
return qs
def save_model(self, request, obj, form, change):
obj.purge_translation_cache()
return super(MediaFileAdmin, self).save_model(request, obj, form, change)
#-------------------------------------------------------------------------
| super(MediaFileBase, self).__init__(*args, **kwargs)
if self.file and self.file.path:
self._original_file_path = self.file.path | identifier_body |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf import settings as django_settings
from django.core.urlresolvers import get_callable
from django.db import models
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.http import HttpResponseRedirect
# 1.2 from django.views.decorators.csrf import csrf_protect
from feincms import settings
from feincms.models import Base
from feincms.templatetags import feincms_thumbnail
from feincms.translations import TranslatedObjectMixin, Translation, \
TranslatedObjectManager
import re
import os
import logging
from PIL import Image
# ------------------------------------------------------------------------
class CategoryManager(models.Manager):
"""
Simple manager which exists only to supply ``.select_related("parent")``
on querysets since we can't even __unicode__ efficiently without it.
"""
def get_query_set(self):
return super(CategoryManager, self).get_query_set().select_related("parent")
# ------------------------------------------------------------------------
class Category(models.Model):
"""
These categories are meant primarily for organizing media files in the
library.
"""
title = models.CharField(_('title'), max_length=200)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children', limit_choices_to={'parent__isnull': True},
verbose_name=_('parent'))
slug = models.SlugField(_('slug'), max_length=150)
class Meta:
ordering = ['parent__title', 'title']
verbose_name = _('category')
verbose_name_plural = _('categories')
objects = CategoryManager()
def __unicode__(self):
if self.parent_id:
return u'%s - %s' % (self.parent.title, self.title)
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['parent', 'title']
list_filter = ['parent']
list_per_page = 25
search_fields = ['title']
prepopulated_fields = { 'slug': ('title',), }
# ------------------------------------------------------------------------
class MediaFileBase(Base, TranslatedObjectMixin):
"""
Abstract media file class. Inherits from :class:`feincms.module.Base`
because of the (handy) extension mechanism.
"""
from django.core.files.storage import FileSystemStorage
default_storage_class = getattr(django_settings, 'DEFAULT_FILE_STORAGE',
'django.core.files.storage.FileSystemStorage')
default_storage = get_callable(default_storage_class)
fs = default_storage(location=settings.FEINCMS_MEDIALIBRARY_ROOT,
base_url=settings.FEINCMS_MEDIALIBRARY_URL)
file = models.FileField(_('file'), max_length=255, upload_to=settings.FEINCMS_MEDIALIBRARY_UPLOAD_TO, storage=fs)
type = models.CharField(_('file type'), max_length=12, editable=False, choices=())
created = models.DateTimeField(_('created'), editable=False, default=datetime.now)
copyright = models.CharField(_('copyright'), max_length=200, blank=True)
file_size = models.IntegerField(_("file size"), blank=True, null=True, editable=False)
categories = models.ManyToManyField(Category, verbose_name=_('categories'),
blank=True, null=True)
categories.category_filter = True
class Meta:
abstract = True
verbose_name = _('media file')
verbose_name_plural = _('media files')
objects = TranslatedObjectManager()
filetypes = [ ]
filetypes_dict = { }
def formatted_file_size(self):
return filesizeformat(self.file_size)
formatted_file_size.short_description = _("file size")
formatted_file_size.admin_order_field = 'file_size'
def formatted_created(self):
return self.created.strftime("%Y-%m-%d %H:%M")
formatted_created.short_description = _("created")
formatted_created.admin_order_field = 'created'
@classmethod
def reconfigure(cls, upload_to=None, storage=None):
f = cls._meta.get_field('file')
# Ugh. Copied relevant parts from django/db/models/fields/files.py
# FileField.__init__ (around line 225)
if storage:
f.storage = storage
if upload_to:
f.upload_to = upload_to
if callable(upload_to):
f.generate_filename = upload_to
@classmethod
def register_filetypes(cls, *types):
cls.filetypes[0:0] = types
choices = [ t[0:2] for t in cls.filetypes ]
cls.filetypes_dict = dict(choices)
cls._meta.get_field('type').choices[:] = choices
def __init__(self, *args, **kwargs):
super(MediaFileBase, self).__init__(*args, **kwargs)
if self.file and self.file.path:
self._original_file_path = self.file.path
def __unicode__(self):
trans = None
# This might be provided using a .extra() clause to avoid hundreds of extra queries:
if hasattr(self, "preferred_translation"):
trans = getattr(self, "preferred_translation", u"")
else:
try:
trans = unicode(self.translation)
except models.ObjectDoesNotExist:
pass
except AttributeError, e:
pass
if trans:
return trans
else:
return os.path.basename(self.file.name)
def get_absolute_url(self):
return self.file.url
def file_type(self):
t = self.filetypes_dict[self.type]
if self.type == 'image':
try:
from django.core.files.images import get_image_dimensions
d = get_image_dimensions(self.file.file)
if d: t += "<br/>%d×%d" % ( d[0], d[1] )
except IOError, e:
t += "<br/>(%s)" % e.strerror
return t
file_type.admin_order_field = 'type'
file_type.short_description = _('file type')
file_type.allow_tags = True
def file_info(self):
"""
Method for showing the file name in admin.
Note: This also includes a hidden field that can be used to extract
the file name later on, this can be used to access the file name from
JS, like for example a TinyMCE connector shim.
"""
from os.path import basename
from feincms.utils import shorten_string
return u'<input type="hidden" class="medialibrary_file_path" name="_media_path_%d" value="%s" /> %s' % (
self.id,
self.file.name,
shorten_string(basename(self.file.name), max_length=28), )
file_info.short_description = _('file info')
file_info.allow_tags = True
def determine_file_type(self, name):
"""
>>> t = MediaFileBase()
>>> t.determine_file_type('foobar.jpg')
'image'
>>> t.determine_file_type('foobar.PDF')
'pdf'
>>> t.determine_file_type('foobar.jpg.pdf')
'pdf'
>>> t.determine_file_type('foobar.jgp')
'other'
>>> t.determine_file_type('foobar-jpg')
'other'
"""
for type_key, type_name, type_test in self.filetypes:
if type_test(name):
return type_key
return self.filetypes[-1][0]
def save(self, *args, **kwargs):
if not self.id and not self.created:
self.created = datetime.now()
self.type = self.determine_file_type(self.file.name)
if self.file:
try:
self.file_size = self.file.size
except (OSError, IOError, ValueError), e:
logging.error("Unable to read file size for %s: %s", self, e)
# Try to detect things that are not really images
if self.type == 'image':
try:
try:
image = Image.open(self.file)
except (OSError, IOError):
image = Image.open(self.file.path)
# Rotate image based on exif data.
if image:
try:
exif = image._getexif()
except (AttributeError, IOError):
exif = False
# PIL < 1.1.7 chokes on JPEGs with minimal EXIF data and
# throws a KeyError deep in its guts.
except KeyError:
exif = False
if exif:
orientation = exif.get(274)
rotation = 0
if orientation == 3:
rotation = 180
elif orientation == 6:
rotation = 270
elif orientation == 8:
rotation = 90
if rotation:
image = image.rotate(rotation)
image.save(self.file.path)
except (OSError, IOError), e:
self.type = self.determine_file_type('***') # It's binary something
if getattr(self, '_original_file_path', None):
if self.file.path != self._original_file_path:
try:
os.unlink(self._original_file_path)
except:
pass
super(MediaFileBase, self).save(*args, **kwargs)
self.purge_translation_cache()
# ------------------------------------------------------------------------
MediaFileBase.register_filetypes(
# Should we be using imghdr.what instead of extension guessing?
('image', _('Image'), lambda f: re.compile(r'\.(bmp|jpe?g|jp2|jxr|gif|png|tiff?)$', re.IGNORECASE).search(f)),
('video', _('Video'), lambda f: re.compile(r'\.(mov|m[14]v|mp4|avi|mpe?g|qt|ogv|wmv)$', re.IGNORECASE).search(f)),
('audio', _('Audio'), lambda f: re.compile(r'\.(au|mp3|m4a|wma|oga|ram|wav)$', re.IGNORECASE).search(f)),
('pdf', _('PDF document'), lambda f: f.lower().endswith('.pdf')),
('swf', _('Flash'), lambda f: f.lower().endswith('.swf')),
('txt', _('Text'), lambda f: f.lower().endswith('.txt')),
('rtf', _('Rich Text'), lambda f: f.lower().endswith('.rtf')),
('zip', _('Zip archive'), lambda f: f.lower().endswith('.zip')),
('doc', _('Microsoft Word'), lambda f: re.compile(r'\.docx?$', re.IGNORECASE).search(f)),
('xls', _('Microsoft Excel'), lambda f: re.compile(r'\.xlsx?$', re.IGNORECASE).search(f)),
('ppt', _('Microsoft PowerPoint'), lambda f: re.compile(r'\.pptx?$', re.IGNORECASE).search(f)),
('other', _('Binary'), lambda f: True), # Must be last
)
# ------------------------------------------------------------------------
class MediaFile(MediaFileBase):
@classmethod
def register_extension(cls, register_fn):
register_fn(cls, MediaFileAdmin)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
class MediaFileTranslation(Translation(MediaFile)):
"""
Translated media file caption and description.
"""
caption = models.CharField(_('caption'), max_length=200)
description = models.TextField(_('description'), blank=True)
class Meta:
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
def __unicode__(self):
return self.caption
#-------------------------------------------------------------------------
class MediaFileTranslationInline(admin.StackedInline):
model = MediaFileTranslation
max_num = len(django_settings.LANGUAGES)
def admin_thumbnail(obj):
if obj.type == 'image':
image = None
try:
image = feincms_thumbnail.thumbnail(obj.file.name, '100x60')
except:
pass
if image:
return mark_safe(u"""
<a href="%(url)s" target="_blank">
<img src="%(image)s" alt="" />
</a>""" % {
'url': obj.file.url,
'image': image,})
return ''
admin_thumbnail.short_description = _('Preview')
admin_thumbnail.allow_tags = True
#-------------------------------------------------------------------------
class MediaFileAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
inlines = [MediaFileTranslationInline]
list_display = ['__unicode__', admin_thumbnail, 'file_type', 'copyright', 'file_info', 'formatted_file_size', 'formatted_created']
list_filter = ['type', 'categories']
list_per_page = 25
search_fields = ['copyright', 'file', 'translations__caption']
filter_horizontal = ("categories",)
def get_urls(self):
from django.conf.urls.defaults import url, patterns
urls = super(MediaFileAdmin, self).get_urls()
my_urls = patterns('',
url(r'^mediafile-bulk-upload/$', self.admin_site.admin_view(MediaFileAdmin.bulk_upload), {}, name='mediafile_bulk_upload')
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['categories'] = Category.objects.all()
return super(MediaFileAdmin, self).changelist_view(request, extra_context=extra_context)
@staticmethod
# 1.2 @csrf_protect
@permission_required('medialibrary.add_mediafile')
def bulk_upload(request):
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
def import_zipfile(request, category_id, data):
import zipfile
from os import path
category = None
if category_id:
category = Category.objects.get(pk=int(category_id))
try:
z = zipfile.ZipFile(data)
storage = MediaFile.fs |
count = 0
for zi in z.infolist():
if not zi.filename.endswith('/'):
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
bname = path.basename(zi.filename)
if bname and not bname.startswith(".") and "." in bname:
fname, ext = path.splitext(bname)
target_fname = slugify(fname) + ext.lower()
mf = MediaFile()
mf.file.save(target_fname, ContentFile(z.read(zi.filename)))
mf.save()
if category:
mf.categories.add(category)
count += 1
messages.info(request, _("%d files imported") % count)
except Exception, e:
messages.error(request, _("ZIP file invalid: %s") % str(e))
return
if request.method == 'POST' and 'data' in request.FILES:
import_zipfile(request, request.POST.get('category'), request.FILES['data'])
else:
messages.error(request, _("No input file given"))
return HttpResponseRedirect(reverse('admin:medialibrary_mediafile_changelist'))
def queryset(self, request):
qs = super(MediaFileAdmin, self).queryset(request)
# FIXME: This is an ugly hack but it avoids 1-3 queries per *FILE*
# retrieving the translation information
if django_settings.DATABASE_ENGINE == 'postgresql_psycopg2':
qs = qs.extra(
select = {
'preferred_translation':
"""SELECT caption FROM medialibrary_mediafiletranslation
WHERE medialibrary_mediafiletranslation.parent_id = medialibrary_mediafile.id
ORDER BY
language_code = %s DESC,
language_code = %s DESC,
LENGTH(language_code) DESC
LIMIT 1
"""
},
select_params = (translation.get_language(), django_settings.LANGUAGE_CODE)
)
return qs
def save_model(self, request, obj, form, change):
obj.purge_translation_cache()
return super(MediaFileAdmin, self).save_model(request, obj, form, change)
#------------------------------------------------------------------------- | if not storage:
messages.error(request, _("Could not access storage"))
return | random_line_split |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf import settings as django_settings
from django.core.urlresolvers import get_callable
from django.db import models
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.http import HttpResponseRedirect
# 1.2 from django.views.decorators.csrf import csrf_protect
from feincms import settings
from feincms.models import Base
from feincms.templatetags import feincms_thumbnail
from feincms.translations import TranslatedObjectMixin, Translation, \
TranslatedObjectManager
import re
import os
import logging
from PIL import Image
# ------------------------------------------------------------------------
class CategoryManager(models.Manager):
"""
Simple manager which exists only to supply ``.select_related("parent")``
on querysets since we can't even __unicode__ efficiently without it.
"""
def get_query_set(self):
return super(CategoryManager, self).get_query_set().select_related("parent")
# ------------------------------------------------------------------------
class Category(models.Model):
"""
These categories are meant primarily for organizing media files in the
library.
"""
title = models.CharField(_('title'), max_length=200)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children', limit_choices_to={'parent__isnull': True},
verbose_name=_('parent'))
slug = models.SlugField(_('slug'), max_length=150)
class Meta:
ordering = ['parent__title', 'title']
verbose_name = _('category')
verbose_name_plural = _('categories')
objects = CategoryManager()
def __unicode__(self):
if self.parent_id:
return u'%s - %s' % (self.parent.title, self.title)
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['parent', 'title']
list_filter = ['parent']
list_per_page = 25
search_fields = ['title']
prepopulated_fields = { 'slug': ('title',), }
# ------------------------------------------------------------------------
class MediaFileBase(Base, TranslatedObjectMixin):
"""
Abstract media file class. Inherits from :class:`feincms.module.Base`
because of the (handy) extension mechanism.
"""
from django.core.files.storage import FileSystemStorage
default_storage_class = getattr(django_settings, 'DEFAULT_FILE_STORAGE',
'django.core.files.storage.FileSystemStorage')
default_storage = get_callable(default_storage_class)
fs = default_storage(location=settings.FEINCMS_MEDIALIBRARY_ROOT,
base_url=settings.FEINCMS_MEDIALIBRARY_URL)
file = models.FileField(_('file'), max_length=255, upload_to=settings.FEINCMS_MEDIALIBRARY_UPLOAD_TO, storage=fs)
type = models.CharField(_('file type'), max_length=12, editable=False, choices=())
created = models.DateTimeField(_('created'), editable=False, default=datetime.now)
copyright = models.CharField(_('copyright'), max_length=200, blank=True)
file_size = models.IntegerField(_("file size"), blank=True, null=True, editable=False)
categories = models.ManyToManyField(Category, verbose_name=_('categories'),
blank=True, null=True)
categories.category_filter = True
class Meta:
abstract = True
verbose_name = _('media file')
verbose_name_plural = _('media files')
objects = TranslatedObjectManager()
filetypes = [ ]
filetypes_dict = { }
def formatted_file_size(self):
return filesizeformat(self.file_size)
formatted_file_size.short_description = _("file size")
formatted_file_size.admin_order_field = 'file_size'
def formatted_created(self):
return self.created.strftime("%Y-%m-%d %H:%M")
formatted_created.short_description = _("created")
formatted_created.admin_order_field = 'created'
@classmethod
def reconfigure(cls, upload_to=None, storage=None):
f = cls._meta.get_field('file')
# Ugh. Copied relevant parts from django/db/models/fields/files.py
# FileField.__init__ (around line 225)
if storage:
f.storage = storage
if upload_to:
f.upload_to = upload_to
if callable(upload_to):
f.generate_filename = upload_to
@classmethod
def register_filetypes(cls, *types):
cls.filetypes[0:0] = types
choices = [ t[0:2] for t in cls.filetypes ]
cls.filetypes_dict = dict(choices)
cls._meta.get_field('type').choices[:] = choices
def __init__(self, *args, **kwargs):
super(MediaFileBase, self).__init__(*args, **kwargs)
if self.file and self.file.path:
self._original_file_path = self.file.path
def __unicode__(self):
trans = None
# This might be provided using a .extra() clause to avoid hundreds of extra queries:
if hasattr(self, "preferred_translation"):
trans = getattr(self, "preferred_translation", u"")
else:
try:
trans = unicode(self.translation)
except models.ObjectDoesNotExist:
pass
except AttributeError, e:
pass
if trans:
return trans
else:
return os.path.basename(self.file.name)
def get_absolute_url(self):
return self.file.url
def file_type(self):
t = self.filetypes_dict[self.type]
if self.type == 'image':
try:
from django.core.files.images import get_image_dimensions
d = get_image_dimensions(self.file.file)
if d: t += "<br/>%d×%d" % ( d[0], d[1] )
except IOError, e:
t += "<br/>(%s)" % e.strerror
return t
file_type.admin_order_field = 'type'
file_type.short_description = _('file type')
file_type.allow_tags = True
def file_info(self):
"""
Method for showing the file name in admin.
Note: This also includes a hidden field that can be used to extract
the file name later on, this can be used to access the file name from
JS, like for example a TinyMCE connector shim.
"""
from os.path import basename
from feincms.utils import shorten_string
return u'<input type="hidden" class="medialibrary_file_path" name="_media_path_%d" value="%s" /> %s' % (
self.id,
self.file.name,
shorten_string(basename(self.file.name), max_length=28), )
file_info.short_description = _('file info')
file_info.allow_tags = True
def determine_file_type(self, name):
"""
>>> t = MediaFileBase()
>>> t.determine_file_type('foobar.jpg')
'image'
>>> t.determine_file_type('foobar.PDF')
'pdf'
>>> t.determine_file_type('foobar.jpg.pdf')
'pdf'
>>> t.determine_file_type('foobar.jgp')
'other'
>>> t.determine_file_type('foobar-jpg')
'other'
"""
for type_key, type_name, type_test in self.filetypes:
if type_test(name):
return type_key
return self.filetypes[-1][0]
def save(self, *args, **kwargs):
if not self.id and not self.created:
self.created = datetime.now()
self.type = self.determine_file_type(self.file.name)
if self.file:
try:
self.file_size = self.file.size
except (OSError, IOError, ValueError), e:
logging.error("Unable to read file size for %s: %s", self, e)
# Try to detect things that are not really images
if self.type == 'image':
try:
try:
image = Image.open(self.file)
except (OSError, IOError):
image = Image.open(self.file.path)
# Rotate image based on exif data.
if image:
try:
exif = image._getexif()
except (AttributeError, IOError):
exif = False
# PIL < 1.1.7 chokes on JPEGs with minimal EXIF data and
# throws a KeyError deep in its guts.
except KeyError:
exif = False
if exif:
orientation = exif.get(274)
rotation = 0
if orientation == 3:
rotation = 180
elif orientation == 6:
rotation = 270
elif orientation == 8:
rotation = 90
if rotation:
image = image.rotate(rotation)
image.save(self.file.path)
except (OSError, IOError), e:
self.type = self.determine_file_type('***') # It's binary something
if getattr(self, '_original_file_path', None):
if self.file.path != self._original_file_path:
try:
os.unlink(self._original_file_path)
except:
pass
super(MediaFileBase, self).save(*args, **kwargs)
self.purge_translation_cache()
# ------------------------------------------------------------------------
MediaFileBase.register_filetypes(
# Should we be using imghdr.what instead of extension guessing?
('image', _('Image'), lambda f: re.compile(r'\.(bmp|jpe?g|jp2|jxr|gif|png|tiff?)$', re.IGNORECASE).search(f)),
('video', _('Video'), lambda f: re.compile(r'\.(mov|m[14]v|mp4|avi|mpe?g|qt|ogv|wmv)$', re.IGNORECASE).search(f)),
('audio', _('Audio'), lambda f: re.compile(r'\.(au|mp3|m4a|wma|oga|ram|wav)$', re.IGNORECASE).search(f)),
('pdf', _('PDF document'), lambda f: f.lower().endswith('.pdf')),
('swf', _('Flash'), lambda f: f.lower().endswith('.swf')),
('txt', _('Text'), lambda f: f.lower().endswith('.txt')),
('rtf', _('Rich Text'), lambda f: f.lower().endswith('.rtf')),
('zip', _('Zip archive'), lambda f: f.lower().endswith('.zip')),
('doc', _('Microsoft Word'), lambda f: re.compile(r'\.docx?$', re.IGNORECASE).search(f)),
('xls', _('Microsoft Excel'), lambda f: re.compile(r'\.xlsx?$', re.IGNORECASE).search(f)),
('ppt', _('Microsoft PowerPoint'), lambda f: re.compile(r'\.pptx?$', re.IGNORECASE).search(f)),
('other', _('Binary'), lambda f: True), # Must be last
)
# ------------------------------------------------------------------------
class MediaFile(MediaFileBase):
@classmethod
def register_extension(cls, register_fn):
register_fn(cls, MediaFileAdmin)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
class MediaFileTranslation(Translation(MediaFile)):
"""
Translated media file caption and description.
"""
caption = models.CharField(_('caption'), max_length=200)
description = models.TextField(_('description'), blank=True)
class | :
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
def __unicode__(self):
return self.caption
#-------------------------------------------------------------------------
class MediaFileTranslationInline(admin.StackedInline):
model = MediaFileTranslation
max_num = len(django_settings.LANGUAGES)
def admin_thumbnail(obj):
if obj.type == 'image':
image = None
try:
image = feincms_thumbnail.thumbnail(obj.file.name, '100x60')
except:
pass
if image:
return mark_safe(u"""
<a href="%(url)s" target="_blank">
<img src="%(image)s" alt="" />
</a>""" % {
'url': obj.file.url,
'image': image,})
return ''
admin_thumbnail.short_description = _('Preview')
admin_thumbnail.allow_tags = True
#-------------------------------------------------------------------------
class MediaFileAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
inlines = [MediaFileTranslationInline]
list_display = ['__unicode__', admin_thumbnail, 'file_type', 'copyright', 'file_info', 'formatted_file_size', 'formatted_created']
list_filter = ['type', 'categories']
list_per_page = 25
search_fields = ['copyright', 'file', 'translations__caption']
filter_horizontal = ("categories",)
def get_urls(self):
from django.conf.urls.defaults import url, patterns
urls = super(MediaFileAdmin, self).get_urls()
my_urls = patterns('',
url(r'^mediafile-bulk-upload/$', self.admin_site.admin_view(MediaFileAdmin.bulk_upload), {}, name='mediafile_bulk_upload')
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['categories'] = Category.objects.all()
return super(MediaFileAdmin, self).changelist_view(request, extra_context=extra_context)
@staticmethod
# 1.2 @csrf_protect
@permission_required('medialibrary.add_mediafile')
def bulk_upload(request):
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
def import_zipfile(request, category_id, data):
import zipfile
from os import path
category = None
if category_id:
category = Category.objects.get(pk=int(category_id))
try:
z = zipfile.ZipFile(data)
storage = MediaFile.fs
if not storage:
messages.error(request, _("Could not access storage"))
return
count = 0
for zi in z.infolist():
if not zi.filename.endswith('/'):
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
bname = path.basename(zi.filename)
if bname and not bname.startswith(".") and "." in bname:
fname, ext = path.splitext(bname)
target_fname = slugify(fname) + ext.lower()
mf = MediaFile()
mf.file.save(target_fname, ContentFile(z.read(zi.filename)))
mf.save()
if category:
mf.categories.add(category)
count += 1
messages.info(request, _("%d files imported") % count)
except Exception, e:
messages.error(request, _("ZIP file invalid: %s") % str(e))
return
if request.method == 'POST' and 'data' in request.FILES:
import_zipfile(request, request.POST.get('category'), request.FILES['data'])
else:
messages.error(request, _("No input file given"))
return HttpResponseRedirect(reverse('admin:medialibrary_mediafile_changelist'))
def queryset(self, request):
qs = super(MediaFileAdmin, self).queryset(request)
# FIXME: This is an ugly hack but it avoids 1-3 queries per *FILE*
# retrieving the translation information
if django_settings.DATABASE_ENGINE == 'postgresql_psycopg2':
qs = qs.extra(
select = {
'preferred_translation':
"""SELECT caption FROM medialibrary_mediafiletranslation
WHERE medialibrary_mediafiletranslation.parent_id = medialibrary_mediafile.id
ORDER BY
language_code = %s DESC,
language_code = %s DESC,
LENGTH(language_code) DESC
LIMIT 1
"""
},
select_params = (translation.get_language(), django_settings.LANGUAGE_CODE)
)
return qs
def save_model(self, request, obj, form, change):
obj.purge_translation_cache()
return super(MediaFileAdmin, self).save_model(request, obj, form, change)
#-------------------------------------------------------------------------
| Meta | identifier_name |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf import settings as django_settings
from django.core.urlresolvers import get_callable
from django.db import models
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.http import HttpResponseRedirect
# 1.2 from django.views.decorators.csrf import csrf_protect
from feincms import settings
from feincms.models import Base
from feincms.templatetags import feincms_thumbnail
from feincms.translations import TranslatedObjectMixin, Translation, \
TranslatedObjectManager
import re
import os
import logging
from PIL import Image
# ------------------------------------------------------------------------
class CategoryManager(models.Manager):
"""
Simple manager which exists only to supply ``.select_related("parent")``
on querysets since we can't even __unicode__ efficiently without it.
"""
def get_query_set(self):
return super(CategoryManager, self).get_query_set().select_related("parent")
# ------------------------------------------------------------------------
class Category(models.Model):
"""
These categories are meant primarily for organizing media files in the
library.
"""
title = models.CharField(_('title'), max_length=200)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children', limit_choices_to={'parent__isnull': True},
verbose_name=_('parent'))
slug = models.SlugField(_('slug'), max_length=150)
class Meta:
ordering = ['parent__title', 'title']
verbose_name = _('category')
verbose_name_plural = _('categories')
objects = CategoryManager()
def __unicode__(self):
if self.parent_id:
return u'%s - %s' % (self.parent.title, self.title)
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['parent', 'title']
list_filter = ['parent']
list_per_page = 25
search_fields = ['title']
prepopulated_fields = { 'slug': ('title',), }
# ------------------------------------------------------------------------
class MediaFileBase(Base, TranslatedObjectMixin):
"""
Abstract media file class. Inherits from :class:`feincms.module.Base`
because of the (handy) extension mechanism.
"""
from django.core.files.storage import FileSystemStorage
default_storage_class = getattr(django_settings, 'DEFAULT_FILE_STORAGE',
'django.core.files.storage.FileSystemStorage')
default_storage = get_callable(default_storage_class)
fs = default_storage(location=settings.FEINCMS_MEDIALIBRARY_ROOT,
base_url=settings.FEINCMS_MEDIALIBRARY_URL)
file = models.FileField(_('file'), max_length=255, upload_to=settings.FEINCMS_MEDIALIBRARY_UPLOAD_TO, storage=fs)
type = models.CharField(_('file type'), max_length=12, editable=False, choices=())
created = models.DateTimeField(_('created'), editable=False, default=datetime.now)
copyright = models.CharField(_('copyright'), max_length=200, blank=True)
file_size = models.IntegerField(_("file size"), blank=True, null=True, editable=False)
categories = models.ManyToManyField(Category, verbose_name=_('categories'),
blank=True, null=True)
categories.category_filter = True
class Meta:
abstract = True
verbose_name = _('media file')
verbose_name_plural = _('media files')
objects = TranslatedObjectManager()
filetypes = [ ]
filetypes_dict = { }
def formatted_file_size(self):
return filesizeformat(self.file_size)
formatted_file_size.short_description = _("file size")
formatted_file_size.admin_order_field = 'file_size'
def formatted_created(self):
return self.created.strftime("%Y-%m-%d %H:%M")
formatted_created.short_description = _("created")
formatted_created.admin_order_field = 'created'
@classmethod
def reconfigure(cls, upload_to=None, storage=None):
f = cls._meta.get_field('file')
# Ugh. Copied relevant parts from django/db/models/fields/files.py
# FileField.__init__ (around line 225)
if storage:
f.storage = storage
if upload_to:
f.upload_to = upload_to
if callable(upload_to):
f.generate_filename = upload_to
@classmethod
def register_filetypes(cls, *types):
cls.filetypes[0:0] = types
choices = [ t[0:2] for t in cls.filetypes ]
cls.filetypes_dict = dict(choices)
cls._meta.get_field('type').choices[:] = choices
def __init__(self, *args, **kwargs):
super(MediaFileBase, self).__init__(*args, **kwargs)
if self.file and self.file.path:
self._original_file_path = self.file.path
def __unicode__(self):
trans = None
# This might be provided using a .extra() clause to avoid hundreds of extra queries:
if hasattr(self, "preferred_translation"):
trans = getattr(self, "preferred_translation", u"")
else:
try:
trans = unicode(self.translation)
except models.ObjectDoesNotExist:
pass
except AttributeError, e:
pass
if trans:
return trans
else:
return os.path.basename(self.file.name)
def get_absolute_url(self):
return self.file.url
def file_type(self):
t = self.filetypes_dict[self.type]
if self.type == 'image':
try:
from django.core.files.images import get_image_dimensions
d = get_image_dimensions(self.file.file)
if d: t += "<br/>%d×%d" % ( d[0], d[1] )
except IOError, e:
t += "<br/>(%s)" % e.strerror
return t
file_type.admin_order_field = 'type'
file_type.short_description = _('file type')
file_type.allow_tags = True
def file_info(self):
"""
Method for showing the file name in admin.
Note: This also includes a hidden field that can be used to extract
the file name later on, this can be used to access the file name from
JS, like for example a TinyMCE connector shim.
"""
from os.path import basename
from feincms.utils import shorten_string
return u'<input type="hidden" class="medialibrary_file_path" name="_media_path_%d" value="%s" /> %s' % (
self.id,
self.file.name,
shorten_string(basename(self.file.name), max_length=28), )
file_info.short_description = _('file info')
file_info.allow_tags = True
def determine_file_type(self, name):
"""
>>> t = MediaFileBase()
>>> t.determine_file_type('foobar.jpg')
'image'
>>> t.determine_file_type('foobar.PDF')
'pdf'
>>> t.determine_file_type('foobar.jpg.pdf')
'pdf'
>>> t.determine_file_type('foobar.jgp')
'other'
>>> t.determine_file_type('foobar-jpg')
'other'
"""
for type_key, type_name, type_test in self.filetypes:
if type_test(name):
return type_key
return self.filetypes[-1][0]
def save(self, *args, **kwargs):
if not self.id and not self.created:
self.created = datetime.now()
self.type = self.determine_file_type(self.file.name)
if self.file:
try:
self.file_size = self.file.size
except (OSError, IOError, ValueError), e:
logging.error("Unable to read file size for %s: %s", self, e)
# Try to detect things that are not really images
if self.type == 'image':
try:
try:
image = Image.open(self.file)
except (OSError, IOError):
image = Image.open(self.file.path)
# Rotate image based on exif data.
if image:
try:
exif = image._getexif()
except (AttributeError, IOError):
exif = False
# PIL < 1.1.7 chokes on JPEGs with minimal EXIF data and
# throws a KeyError deep in its guts.
except KeyError:
exif = False
if exif:
orientation = exif.get(274)
rotation = 0
if orientation == 3:
rotation = 180
elif orientation == 6:
rotation = 270
elif orientation == 8:
rotation = 90
if rotation:
image = image.rotate(rotation)
image.save(self.file.path)
except (OSError, IOError), e:
self.type = self.determine_file_type('***') # It's binary something
if getattr(self, '_original_file_path', None):
if self.file.path != self._original_file_path:
try:
os.unlink(self._original_file_path)
except:
pass
super(MediaFileBase, self).save(*args, **kwargs)
self.purge_translation_cache()
# ------------------------------------------------------------------------
MediaFileBase.register_filetypes(
# Should we be using imghdr.what instead of extension guessing?
('image', _('Image'), lambda f: re.compile(r'\.(bmp|jpe?g|jp2|jxr|gif|png|tiff?)$', re.IGNORECASE).search(f)),
('video', _('Video'), lambda f: re.compile(r'\.(mov|m[14]v|mp4|avi|mpe?g|qt|ogv|wmv)$', re.IGNORECASE).search(f)),
('audio', _('Audio'), lambda f: re.compile(r'\.(au|mp3|m4a|wma|oga|ram|wav)$', re.IGNORECASE).search(f)),
('pdf', _('PDF document'), lambda f: f.lower().endswith('.pdf')),
('swf', _('Flash'), lambda f: f.lower().endswith('.swf')),
('txt', _('Text'), lambda f: f.lower().endswith('.txt')),
('rtf', _('Rich Text'), lambda f: f.lower().endswith('.rtf')),
('zip', _('Zip archive'), lambda f: f.lower().endswith('.zip')),
('doc', _('Microsoft Word'), lambda f: re.compile(r'\.docx?$', re.IGNORECASE).search(f)),
('xls', _('Microsoft Excel'), lambda f: re.compile(r'\.xlsx?$', re.IGNORECASE).search(f)),
('ppt', _('Microsoft PowerPoint'), lambda f: re.compile(r'\.pptx?$', re.IGNORECASE).search(f)),
('other', _('Binary'), lambda f: True), # Must be last
)
# ------------------------------------------------------------------------
class MediaFile(MediaFileBase):
@classmethod
def register_extension(cls, register_fn):
register_fn(cls, MediaFileAdmin)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
class MediaFileTranslation(Translation(MediaFile)):
"""
Translated media file caption and description.
"""
caption = models.CharField(_('caption'), max_length=200)
description = models.TextField(_('description'), blank=True)
class Meta:
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
def __unicode__(self):
return self.caption
#-------------------------------------------------------------------------
class MediaFileTranslationInline(admin.StackedInline):
model = MediaFileTranslation
max_num = len(django_settings.LANGUAGES)
def admin_thumbnail(obj):
if obj.type == 'image':
image = None
try:
image = feincms_thumbnail.thumbnail(obj.file.name, '100x60')
except:
pass
if image:
return mark_safe(u"""
<a href="%(url)s" target="_blank">
<img src="%(image)s" alt="" />
</a>""" % {
'url': obj.file.url,
'image': image,})
return ''
admin_thumbnail.short_description = _('Preview')
admin_thumbnail.allow_tags = True
#-------------------------------------------------------------------------
class MediaFileAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
inlines = [MediaFileTranslationInline]
list_display = ['__unicode__', admin_thumbnail, 'file_type', 'copyright', 'file_info', 'formatted_file_size', 'formatted_created']
list_filter = ['type', 'categories']
list_per_page = 25
search_fields = ['copyright', 'file', 'translations__caption']
filter_horizontal = ("categories",)
def get_urls(self):
from django.conf.urls.defaults import url, patterns
urls = super(MediaFileAdmin, self).get_urls()
my_urls = patterns('',
url(r'^mediafile-bulk-upload/$', self.admin_site.admin_view(MediaFileAdmin.bulk_upload), {}, name='mediafile_bulk_upload')
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['categories'] = Category.objects.all()
return super(MediaFileAdmin, self).changelist_view(request, extra_context=extra_context)
@staticmethod
# 1.2 @csrf_protect
@permission_required('medialibrary.add_mediafile')
def bulk_upload(request):
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
def import_zipfile(request, category_id, data):
import zipfile
from os import path
category = None
if category_id:
category = Category.objects.get(pk=int(category_id))
try:
z = zipfile.ZipFile(data)
storage = MediaFile.fs
if not storage:
messages.error(request, _("Could not access storage"))
return
count = 0
for zi in z.infolist():
if not zi.filename.endswith('/'):
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
bname = path.basename(zi.filename)
if bname and not bname.startswith(".") and "." in bname:
fname, ext = path.splitext(bname)
target_fname = slugify(fname) + ext.lower()
mf = MediaFile()
mf.file.save(target_fname, ContentFile(z.read(zi.filename)))
mf.save()
if category:
mf.categories.add(category)
count += 1
messages.info(request, _("%d files imported") % count)
except Exception, e:
messages.error(request, _("ZIP file invalid: %s") % str(e))
return
if request.method == 'POST' and 'data' in request.FILES:
import_zipfile(request, request.POST.get('category'), request.FILES['data'])
else:
|
return HttpResponseRedirect(reverse('admin:medialibrary_mediafile_changelist'))
def queryset(self, request):
qs = super(MediaFileAdmin, self).queryset(request)
# FIXME: This is an ugly hack but it avoids 1-3 queries per *FILE*
# retrieving the translation information
if django_settings.DATABASE_ENGINE == 'postgresql_psycopg2':
qs = qs.extra(
select = {
'preferred_translation':
"""SELECT caption FROM medialibrary_mediafiletranslation
WHERE medialibrary_mediafiletranslation.parent_id = medialibrary_mediafile.id
ORDER BY
language_code = %s DESC,
language_code = %s DESC,
LENGTH(language_code) DESC
LIMIT 1
"""
},
select_params = (translation.get_language(), django_settings.LANGUAGE_CODE)
)
return qs
def save_model(self, request, obj, form, change):
obj.purge_translation_cache()
return super(MediaFileAdmin, self).save_model(request, obj, form, change)
#-------------------------------------------------------------------------
| messages.error(request, _("No input file given")) | conditional_block |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dropout_seed = None
def wrap_layer_with_block(layer, block_idx):
"""
Make layer define support indicating block, by which we can add layers
to other blocks within current block. This will make it easy to define
cache among while loop.
"""
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to switch to the given block in a program by
using the Python `with` keyword.
"""
def __init__(self, block_idx=None, main_program=None):
self.main_program = fluid.default_main_program(
) if main_program is None else main_program
self.old_block_idx = self.main_program.current_block().idx
self.new_block_idx = block_idx
def __enter__(self):
self.main_program.current_block_idx = self.new_block_idx
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.current_block_idx = self.old_block_idx
if exc_type is not None:
return False # re-raise exception
return True
def layer_wrapper(*args, **kwargs):
with BlockGuard(block_idx):
return layer(*args, **kwargs)
return layer_wrapper
def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,
n_head=1, dropout_rate=0., cache=None, static_kv=False):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activation to mask certain selected positions so that
they will not considered in attention weights.
Args:
queries: input_sentence, shaped like [bsz, len_sentence, embedding_dim].
keys: Most of the time, you just need queries, so set this value as None.
values: Most of the time, you just need queries, so set this value as None.
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
d_key: The dimension wanted for keys and queries.
d_value: The dimension wanted for values.
d_model: output dimension of fully connected layer.
pos_enc: Relative Positional encoder, whose shape is [2 X len_sentence, d_key].
n_head: Number of attention heads.
dropout_rate: probability on dropout layer.
Return:
The result of this multi-head attention layer.
shape: [batch size, sentence len, d_model].
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors."
)
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
fc_layer = wrap_layer_with_block(
layers.fc, fluid.default_main_program().current_block().parent_idx
) if cache is not None and static_kv else layers.fc
k = fc_layer(input=keys, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
v = fc_layer(input=values, size=d_value * n_head,
bias_attr=False, num_flatten_dims=2)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = layers.reshape(
x=queries, shape=[0, 0, n_head, d_key], inplace=True)
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshape_layer = wrap_layer_with_block(
layers.reshape,
fluid.default_main_program().current_block(
).parent_idx) if cache is not None and static_kv else layers.reshape
transpose_layer = wrap_layer_with_block(
layers.transpose,
fluid.default_main_program().current_block().
parent_idx) if cache is not None and static_kv else layers.transpose
reshaped_k = reshape_layer(
x=keys, shape=[0, 0, n_head, d_key], inplace=True)
k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = reshape_layer(
x=values, shape=[0, 0, n_head, d_value], inplace=True)
v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None: # only for faster inference
cache_, i = cache
if static_kv: # For encoder-decoder attention in inference
cache_k, cache_v = cache_["static_k"], cache_["static_v"]
# To init the static_k and static_v in global block.
static_cache_init = wrap_layer_with_block(
layers.assign,
fluid.default_main_program().current_block().parent_idx)
static_cache_init(
k,
fluid.default_main_program().global_block().var(
"static_k_%d" % i))
static_cache_init(
v,
fluid.default_main_program().global_block().var(
"static_v_%d" % i))
k, v = cache_k, cache_v
else: # For decoder self-attention in inference
# use cache and concat time steps.
cache_k, cache_v = cache_["k"], cache_["v"]
k = layers.concat([cache_k, k], axis=2)
v = layers.concat([cache_v, v], axis=2)
cache_["k"], cache_["v"] = (k, v)
return q, k, v
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def _shift(BD):
"""
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
to
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(E):
"""
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
to
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
indice = layers.arange(start=0, end=max_len, dtype=int)
E = layers.index_select(input=E, index=indice, dim=-2)
E = layers.transpose(E, perm=[0, 1, 3, 2])
return E
def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
Change:
- Different from the original one.
We will remove the scale factor math: \sqrt{d_k} according to the paper.
- Bias for attention and position encoding are added.
"""
# product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)
# now q, k should be shaped like
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
# pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key
max_sequence_len = q.shape[2]
r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]
AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]
# position bias for each head, shaped like [n_head, 2 X max_sequence_len].
# Then add two dimensions at `batch` and `maxlen`.
D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]
# position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]
B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)
# bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]
E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)
# shaped like [batch, n_head, max_len, 2 X max_len]
# change it to [batch, n_head, max_len, max_len]
BD = B_ + D_
BDE = _shift(BD) + _transpose_shift(E_)
product = AC + BDE
# product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \
# layers.matmul(x=q, y=pos_enc, transposed_y=True) +\
# layers.transpose(x=last_two, perm=[0, 1, 3, 2])
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
if dropout_rate:
hidden = layers.dropout(
hidden, dropout_prob=dropout_rate, seed=dropout_seed, is_test=False)
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input, attn_bias, n_head, d_key,
d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and dropout.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_head: Number of headers.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
An encoder layer output, (bsz, max_len, d_model).
"""
attn_output = multi_head_attention(
pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),
None, None, attn_bias, d_key, d_value, d_model, pos_enc,
n_head, attention_dropout
) | pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout
)
return post_process_layer(attn_output, ffd_output,
postprocess_cmd, prepostprocess_dropout)
def encoder(enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, pos_enc,
preporstprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_layers: Number of layers stacked together.
n_head: Number of attention heads.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
Encoder output of the sentence input.
(batch size, sentence len, d_model)
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head,
d_key, d_value, d_model,d_inner_hid, pos_enc,
prepostprocess_dropout, attention_dropout,relu_dropout,
preprocess_cmd, postprocess_cmd
)
enc_input = enc_output
enc_output = pre_process_layer(enc_output,
preprocess_cmd, preporstprocess_dropout)
return enc_output | attn_output = post_process_layer(enc_input, attn_output,
postprocess_cmd, prepostprocess_dropout)
ffd_output = positionwise_feed_forward( | random_line_split |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dropout_seed = None
def wrap_layer_with_block(layer, block_idx):
"""
Make layer define support indicating block, by which we can add layers
to other blocks within current block. This will make it easy to define
cache among while loop.
"""
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to switch to the given block in a program by
using the Python `with` keyword.
"""
def __init__(self, block_idx=None, main_program=None):
self.main_program = fluid.default_main_program(
) if main_program is None else main_program
self.old_block_idx = self.main_program.current_block().idx
self.new_block_idx = block_idx
def __enter__(self):
self.main_program.current_block_idx = self.new_block_idx
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.current_block_idx = self.old_block_idx
if exc_type is not None:
return False # re-raise exception
return True
def layer_wrapper(*args, **kwargs):
with BlockGuard(block_idx):
return layer(*args, **kwargs)
return layer_wrapper
def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,
n_head=1, dropout_rate=0., cache=None, static_kv=False):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activation to mask certain selected positions so that
they will not considered in attention weights.
Args:
queries: input_sentence, shaped like [bsz, len_sentence, embedding_dim].
keys: Most of the time, you just need queries, so set this value as None.
values: Most of the time, you just need queries, so set this value as None.
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
d_key: The dimension wanted for keys and queries.
d_value: The dimension wanted for values.
d_model: output dimension of fully connected layer.
pos_enc: Relative Positional encoder, whose shape is [2 X len_sentence, d_key].
n_head: Number of attention heads.
dropout_rate: probability on dropout layer.
Return:
The result of this multi-head attention layer.
shape: [batch size, sentence len, d_model].
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors."
)
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
fc_layer = wrap_layer_with_block(
layers.fc, fluid.default_main_program().current_block().parent_idx
) if cache is not None and static_kv else layers.fc
k = fc_layer(input=keys, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
v = fc_layer(input=values, size=d_value * n_head,
bias_attr=False, num_flatten_dims=2)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = layers.reshape(
x=queries, shape=[0, 0, n_head, d_key], inplace=True)
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshape_layer = wrap_layer_with_block(
layers.reshape,
fluid.default_main_program().current_block(
).parent_idx) if cache is not None and static_kv else layers.reshape
transpose_layer = wrap_layer_with_block(
layers.transpose,
fluid.default_main_program().current_block().
parent_idx) if cache is not None and static_kv else layers.transpose
reshaped_k = reshape_layer(
x=keys, shape=[0, 0, n_head, d_key], inplace=True)
k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = reshape_layer(
x=values, shape=[0, 0, n_head, d_value], inplace=True)
v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None: # only for faster inference
cache_, i = cache
if static_kv: # For encoder-decoder attention in inference
cache_k, cache_v = cache_["static_k"], cache_["static_v"]
# To init the static_k and static_v in global block.
static_cache_init = wrap_layer_with_block(
layers.assign,
fluid.default_main_program().current_block().parent_idx)
static_cache_init(
k,
fluid.default_main_program().global_block().var(
"static_k_%d" % i))
static_cache_init(
v,
fluid.default_main_program().global_block().var(
"static_v_%d" % i))
k, v = cache_k, cache_v
else: # For decoder self-attention in inference
# use cache and concat time steps.
cache_k, cache_v = cache_["k"], cache_["v"]
k = layers.concat([cache_k, k], axis=2)
v = layers.concat([cache_v, v], axis=2)
cache_["k"], cache_["v"] = (k, v)
return q, k, v
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def _shift(BD):
"""
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
to
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(E):
"""
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
to
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
indice = layers.arange(start=0, end=max_len, dtype=int)
E = layers.index_select(input=E, index=indice, dim=-2)
E = layers.transpose(E, perm=[0, 1, 3, 2])
return E
def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):
|
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
if dropout_rate:
hidden = layers.dropout(
hidden, dropout_prob=dropout_rate, seed=dropout_seed, is_test=False)
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input, attn_bias, n_head, d_key,
d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and dropout.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_head: Number of headers.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
An encoder layer output, (bsz, max_len, d_model).
"""
attn_output = multi_head_attention(
pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),
None, None, attn_bias, d_key, d_value, d_model, pos_enc,
n_head, attention_dropout
)
attn_output = post_process_layer(enc_input, attn_output,
postprocess_cmd, prepostprocess_dropout)
ffd_output = positionwise_feed_forward(
pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout
)
return post_process_layer(attn_output, ffd_output,
postprocess_cmd, prepostprocess_dropout)
def encoder(enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, pos_enc,
preporstprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_layers: Number of layers stacked together.
n_head: Number of attention heads.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
Encoder output of the sentence input.
(batch size, sentence len, d_model)
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head,
d_key, d_value, d_model,d_inner_hid, pos_enc,
prepostprocess_dropout, attention_dropout,relu_dropout,
preprocess_cmd, postprocess_cmd
)
enc_input = enc_output
enc_output = pre_process_layer(enc_output,
preprocess_cmd, preporstprocess_dropout)
return enc_output
| """
Scaled Dot-Product Attention
Change:
- Different from the original one.
We will remove the scale factor math: \sqrt{d_k} according to the paper.
- Bias for attention and position encoding are added.
"""
# product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)
# now q, k should be shaped like
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
# pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key
max_sequence_len = q.shape[2]
r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]
AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]
# position bias for each head, shaped like [n_head, 2 X max_sequence_len].
# Then add two dimensions at `batch` and `maxlen`.
D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]
# position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]
B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)
# bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]
E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)
# shaped like [batch, n_head, max_len, 2 X max_len]
# change it to [batch, n_head, max_len, max_len]
BD = B_ + D_
BDE = _shift(BD) + _transpose_shift(E_)
product = AC + BDE
# product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \
# layers.matmul(x=q, y=pos_enc, transposed_y=True) +\
# layers.transpose(x=last_two, perm=[0, 1, 3, 2])
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out | identifier_body |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dropout_seed = None
def wrap_layer_with_block(layer, block_idx):
"""
Make layer define support indicating block, by which we can add layers
to other blocks within current block. This will make it easy to define
cache among while loop.
"""
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to switch to the given block in a program by
using the Python `with` keyword.
"""
def __init__(self, block_idx=None, main_program=None):
self.main_program = fluid.default_main_program(
) if main_program is None else main_program
self.old_block_idx = self.main_program.current_block().idx
self.new_block_idx = block_idx
def __enter__(self):
self.main_program.current_block_idx = self.new_block_idx
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.current_block_idx = self.old_block_idx
if exc_type is not None:
return False # re-raise exception
return True
def layer_wrapper(*args, **kwargs):
with BlockGuard(block_idx):
return layer(*args, **kwargs)
return layer_wrapper
def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,
n_head=1, dropout_rate=0., cache=None, static_kv=False):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activation to mask certain selected positions so that
they will not considered in attention weights.
Args:
queries: input_sentence, shaped like [bsz, len_sentence, embedding_dim].
keys: Most of the time, you just need queries, so set this value as None.
values: Most of the time, you just need queries, so set this value as None.
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
d_key: The dimension wanted for keys and queries.
d_value: The dimension wanted for values.
d_model: output dimension of fully connected layer.
pos_enc: Relative Positional encoder, whose shape is [2 X len_sentence, d_key].
n_head: Number of attention heads.
dropout_rate: probability on dropout layer.
Return:
The result of this multi-head attention layer.
shape: [batch size, sentence len, d_model].
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors."
)
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
fc_layer = wrap_layer_with_block(
layers.fc, fluid.default_main_program().current_block().parent_idx
) if cache is not None and static_kv else layers.fc
k = fc_layer(input=keys, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
v = fc_layer(input=values, size=d_value * n_head,
bias_attr=False, num_flatten_dims=2)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = layers.reshape(
x=queries, shape=[0, 0, n_head, d_key], inplace=True)
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshape_layer = wrap_layer_with_block(
layers.reshape,
fluid.default_main_program().current_block(
).parent_idx) if cache is not None and static_kv else layers.reshape
transpose_layer = wrap_layer_with_block(
layers.transpose,
fluid.default_main_program().current_block().
parent_idx) if cache is not None and static_kv else layers.transpose
reshaped_k = reshape_layer(
x=keys, shape=[0, 0, n_head, d_key], inplace=True)
k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = reshape_layer(
x=values, shape=[0, 0, n_head, d_value], inplace=True)
v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None: # only for faster inference
cache_, i = cache
if static_kv: # For encoder-decoder attention in inference
cache_k, cache_v = cache_["static_k"], cache_["static_v"]
# To init the static_k and static_v in global block.
static_cache_init = wrap_layer_with_block(
layers.assign,
fluid.default_main_program().current_block().parent_idx)
static_cache_init(
k,
fluid.default_main_program().global_block().var(
"static_k_%d" % i))
static_cache_init(
v,
fluid.default_main_program().global_block().var(
"static_v_%d" % i))
k, v = cache_k, cache_v
else: # For decoder self-attention in inference
# use cache and concat time steps.
cache_k, cache_v = cache_["k"], cache_["v"]
k = layers.concat([cache_k, k], axis=2)
v = layers.concat([cache_v, v], axis=2)
cache_["k"], cache_["v"] = (k, v)
return q, k, v
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def _shift(BD):
"""
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
to
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(E):
"""
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
to
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
indice = layers.arange(start=0, end=max_len, dtype=int)
E = layers.index_select(input=E, index=indice, dim=-2)
E = layers.transpose(E, perm=[0, 1, 3, 2])
return E
def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
Change:
- Different from the original one.
We will remove the scale factor math: \sqrt{d_k} according to the paper.
- Bias for attention and position encoding are added.
"""
# product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)
# now q, k should be shaped like
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
# pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key
max_sequence_len = q.shape[2]
r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]
AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]
# position bias for each head, shaped like [n_head, 2 X max_sequence_len].
# Then add two dimensions at `batch` and `maxlen`.
D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]
# position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]
B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)
# bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]
E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)
# shaped like [batch, n_head, max_len, 2 X max_len]
# change it to [batch, n_head, max_len, max_len]
BD = B_ + D_
BDE = _shift(BD) + _transpose_shift(E_)
product = AC + BDE
# product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \
# layers.matmul(x=q, y=pos_enc, transposed_y=True) +\
# layers.transpose(x=last_two, perm=[0, 1, 3, 2])
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
if dropout_rate:
hidden = layers.dropout(
hidden, dropout_prob=dropout_rate, seed=dropout_seed, is_test=False)
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input, attn_bias, n_head, d_key,
d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and dropout.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_head: Number of headers.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
An encoder layer output, (bsz, max_len, d_model).
"""
attn_output = multi_head_attention(
pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),
None, None, attn_bias, d_key, d_value, d_model, pos_enc,
n_head, attention_dropout
)
attn_output = post_process_layer(enc_input, attn_output,
postprocess_cmd, prepostprocess_dropout)
ffd_output = positionwise_feed_forward(
pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout
)
return post_process_layer(attn_output, ffd_output,
postprocess_cmd, prepostprocess_dropout)
def | (enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, pos_enc,
preporstprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_layers: Number of layers stacked together.
n_head: Number of attention heads.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
Encoder output of the sentence input.
(batch size, sentence len, d_model)
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head,
d_key, d_value, d_model,d_inner_hid, pos_enc,
prepostprocess_dropout, attention_dropout,relu_dropout,
preprocess_cmd, postprocess_cmd
)
enc_input = enc_output
enc_output = pre_process_layer(enc_output,
preprocess_cmd, preporstprocess_dropout)
return enc_output
| encoder | identifier_name |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dropout_seed = None
def wrap_layer_with_block(layer, block_idx):
"""
Make layer define support indicating block, by which we can add layers
to other blocks within current block. This will make it easy to define
cache among while loop.
"""
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to switch to the given block in a program by
using the Python `with` keyword.
"""
def __init__(self, block_idx=None, main_program=None):
self.main_program = fluid.default_main_program(
) if main_program is None else main_program
self.old_block_idx = self.main_program.current_block().idx
self.new_block_idx = block_idx
def __enter__(self):
self.main_program.current_block_idx = self.new_block_idx
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.current_block_idx = self.old_block_idx
if exc_type is not None:
return False # re-raise exception
return True
def layer_wrapper(*args, **kwargs):
with BlockGuard(block_idx):
return layer(*args, **kwargs)
return layer_wrapper
def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,
n_head=1, dropout_rate=0., cache=None, static_kv=False):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activation to mask certain selected positions so that
they will not considered in attention weights.
Args:
queries: input_sentence, shaped like [bsz, len_sentence, embedding_dim].
keys: Most of the time, you just need queries, so set this value as None.
values: Most of the time, you just need queries, so set this value as None.
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
d_key: The dimension wanted for keys and queries.
d_value: The dimension wanted for values.
d_model: output dimension of fully connected layer.
pos_enc: Relative Positional encoder, whose shape is [2 X len_sentence, d_key].
n_head: Number of attention heads.
dropout_rate: probability on dropout layer.
Return:
The result of this multi-head attention layer.
shape: [batch size, sentence len, d_model].
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors."
)
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
fc_layer = wrap_layer_with_block(
layers.fc, fluid.default_main_program().current_block().parent_idx
) if cache is not None and static_kv else layers.fc
k = fc_layer(input=keys, size=d_key * n_head,
bias_attr=False, num_flatten_dims=2)
v = fc_layer(input=values, size=d_value * n_head,
bias_attr=False, num_flatten_dims=2)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = layers.reshape(
x=queries, shape=[0, 0, n_head, d_key], inplace=True)
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshape_layer = wrap_layer_with_block(
layers.reshape,
fluid.default_main_program().current_block(
).parent_idx) if cache is not None and static_kv else layers.reshape
transpose_layer = wrap_layer_with_block(
layers.transpose,
fluid.default_main_program().current_block().
parent_idx) if cache is not None and static_kv else layers.transpose
reshaped_k = reshape_layer(
x=keys, shape=[0, 0, n_head, d_key], inplace=True)
k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = reshape_layer(
x=values, shape=[0, 0, n_head, d_value], inplace=True)
v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None: # only for faster inference
cache_, i = cache
if static_kv: # For encoder-decoder attention in inference
cache_k, cache_v = cache_["static_k"], cache_["static_v"]
# To init the static_k and static_v in global block.
static_cache_init = wrap_layer_with_block(
layers.assign,
fluid.default_main_program().current_block().parent_idx)
static_cache_init(
k,
fluid.default_main_program().global_block().var(
"static_k_%d" % i))
static_cache_init(
v,
fluid.default_main_program().global_block().var(
"static_v_%d" % i))
k, v = cache_k, cache_v
else: # For decoder self-attention in inference
# use cache and concat time steps.
cache_k, cache_v = cache_["k"], cache_["v"]
k = layers.concat([cache_k, k], axis=2)
v = layers.concat([cache_v, v], axis=2)
cache_["k"], cache_["v"] = (k, v)
return q, k, v
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def _shift(BD):
"""
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
to
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(E):
"""
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
to
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))
E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),
shape=(bsz, n_head, -1, max_len))
indice = layers.arange(start=0, end=max_len, dtype=int)
E = layers.index_select(input=E, index=indice, dim=-2)
E = layers.transpose(E, perm=[0, 1, 3, 2])
return E
def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
Change:
- Different from the original one.
We will remove the scale factor math: \sqrt{d_k} according to the paper.
- Bias for attention and position encoding are added.
"""
# product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)
# now q, k should be shaped like
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
# pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key
max_sequence_len = q.shape[2]
r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]
rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]
AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]
# position bias for each head, shaped like [n_head, 2 X max_sequence_len].
# Then add two dimensions at `batch` and `maxlen`.
D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]
# position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]
B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)
# bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]
E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)
# shaped like [batch, n_head, max_len, 2 X max_len]
# change it to [batch, n_head, max_len, max_len]
BD = B_ + D_
BDE = _shift(BD) + _transpose_shift(E_)
product = AC + BDE
# product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \
# layers.matmul(x=q, y=pos_enc, transposed_y=True) +\
# layers.transpose(x=last_two, perm=[0, 1, 3, 2])
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
if dropout_rate:
hidden = layers.dropout(
hidden, dropout_prob=dropout_rate, seed=dropout_seed, is_test=False)
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
|
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input, attn_bias, n_head, d_key,
d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and dropout.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_head: Number of headers.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
An encoder layer output, (bsz, max_len, d_model).
"""
attn_output = multi_head_attention(
pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),
None, None, attn_bias, d_key, d_value, d_model, pos_enc,
n_head, attention_dropout
)
attn_output = post_process_layer(enc_input, attn_output,
postprocess_cmd, prepostprocess_dropout)
ffd_output = positionwise_feed_forward(
pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout
)
return post_process_layer(attn_output, ffd_output,
postprocess_cmd, prepostprocess_dropout)
def encoder(enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, pos_enc,
preporstprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
Args:
enc_input: Embedded input for the sentences.
(batch_size, len_sentence, embedding_dim)
attn_bias: Bias added to the attention output before softmax,
in case you want to mask some positions. Just set values as `inf`
on these positions.
n_layers: Number of layers stacked together.
n_head: Number of attention heads.
d_key: Dimension of keys and queries.
d_value: Dimension of values.
d_model: Dimension of the encoder layer outputs.
d_inner_hid: Dimension of the feed forward layer inside.
pos_enc: Relative position encoder. (2 X max__len, d_key).
prepostprocess_dropout: The dropout probability of the process layer
before or after.
attention_dropout: Dropout probability in the attention layer.
relu_dropout: The activation in the feed forward layer is `relu`.
Set the probability here.
post/preprocess_cmd: The layers should be stacked. Use its default values
unless the model needs to be changed.
Return:
Encoder output of the sentence input.
(batch size, sentence len, d_model)
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head,
d_key, d_value, d_model,d_inner_hid, pos_enc,
prepostprocess_dropout, attention_dropout,relu_dropout,
preprocess_cmd, postprocess_cmd
)
enc_input = enc_output
enc_output = pre_process_layer(enc_output,
preprocess_cmd, preporstprocess_dropout)
return enc_output
| if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False) | conditional_block |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID string) *BlockChain {
if !dbExsits(nodeID){
fmt.Println("创世区块不存在!")
os.Exit(1)
}
var Tip []byte
dbName := fmt.Sprintf(dbName,nodeID)
fmt.Println(dbName)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
err = db.View(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
Tip = b.Get([]byte("l"))
}
return nil
})
if err!=nil{
log.Panic(err)
}
//fmt.Println("读取最新区块链成功!")
//读取区块链
return &BlockChain{Tip,db}
}
//1. 创建带有创世区块的区块链
func CreateBlockchainWithGenesisBlock(address string,nodeID string) *BlockChain{
//判断是否存在数据库
if(dbExsits(nodeID)){
fmt.Println("创世区块已存在!")
//在数据库中读取最新区块链
os.Exit(1)
}
// 当数据库不存在时,创建创世区块链
fmt.Println("正在创建创世区块。。。")
dbName := fmt.Sprintf(dbName,nodeID)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
//defer db.Close()
var genesisBlock *Block
err = db.Update(func(tx *bolt.Tx)error{
b,err := tx.CreateBucket([]byte(blockTableName))
if err!=nil{
log.Panic(err)
}
if b!=nil{
//创建一个coinbase transaction
txCoinbase := NewCoinbaseTransaction(address)
//创建创世区块
genesisBlock = CreateGenesisBlock([]*Transaction{txCoinbase})
//将创世区块放入数据库
err=b.Put(genesisBlock.Hash,genesisBlock.Serialize())
if err!=nil{
log.Panic(err)
}
//存储最新的区块的hash
err=b.Put([]byte("l"),genesisBlock.Hash)
if err!=nil{
log.Panic(err)
}
}
return nil
})
fmt.Println("创建创世区块成功!")
return &BlockChain{genesisBlock.Hash,db}
}
// 增加区块到区块链中
//func(blc *BlockChain) AddBlockToBlockchain(txs []*Transaction){
// var height int64
// var preHash []byte
// //获取新增区块的height和preHash
// fmt.Println("开始挖矿。。。")
// err:=blc.DB.View(func(tx *bolt.Tx)error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// //blockHash := b.Get([]byte("l"))
// block := DeSerialize(b.Get(blc.Tip))
// height = block.Height+1
// preHash = block.Hash
// }
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
// // 创建新区块并添加数据库
//
// err = blc.DB.Update(func(tx *bolt.Tx) error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// newBlock := NewBlock(txs,height,preHash)
// newBlockByte := newBlock.Serialize()
// //添加区块信息值数据库
// err :=b.Put(newBlock.Hash,newBlockByte)
// if err!=nil{
// log.Panic(err)
// }
//
// //更新区块链的Tip以及数据库的l
// blc.Tip = newBlock.Hash
// b.Put([]byte("l"),newBlock.Hash)
// fmt.Println("挖矿成功!")
// }
//
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
//
//}
// 遍历输出所有区块的信息
func (blc *BlockChain) PrintChain(){
blockchainIterator := blc.Iterator()
for {
block := blockchainIterator.Next()
fmt.Printf("Height: %d\n",block.Height)
fmt.Printf("PrevBlockHash: %x\n",block.PrevBlockHash)
fmt.Printf("TimeStamp: %s\n",time.Unix(block.Timestamp,0).Format("2006-01-02 15:04:05"))
fmt.Printf("Hash: %x\n",block.Hash)
fmt.Printf("Nonce: %d\n",block.Nonce)
fmt.Println("Txs:")
for _,tx := range block.Txs{
fmt.Printf("%x\n",tx.TxHash)
fmt.Println("Vins:")
for _,in:=range tx.Vins{
fmt.Printf("%x\n",in.TXHash)
fmt.Printf("%d\n",in.Vout)
//fmt.Printf("%x\n",in.Signature)
fmt.Printf("%x\n",in.Pubkey)
}
fmt.Println("Vouts:")
for _,out:=range tx.Vouts{
fmt.Printf("%d\n",out.Value)
fmt.Printf("%v\n",out.Ripemd160Hash)
}
}
fmt.Println("----------------------------------------------------------------------------------")
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
}
// 判断数据库是否存在
func dbExsits(nodeID string) bool{
//生成不同节点的数据库文件
dbName := fmt.Sprintf(dbName,nodeID)
if _,err := os.Stat(dbName);os.IsNotExist(err){
return false
}
return true
}
//如果一个地址所对应的TXout未花费,那么这个就应该被添加到数组中
func (blockchain *BlockChain) UnUTXOs(address string,txs []*Transaction)[]*UTXO{
var unUTXOs []*UTXO
spentTXOutputs := make(map[string][]int)
//
//pubKeyHash := Base58Decode([]byte(address))
//ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
//fmt.Printf("转换后%v\n",ripemd160Hash)
// 处理未加入数据库中的交易
for _,tx := range txs {
if tx.IsCoinBaseTransaction() == false {
for _, in := range tx.Vins {
//是否解锁
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Println(spentTXOutputs)
for _,tx:=range txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//迭代数据库
blockIterator := blockchain.Iterator()
for{
block:=blockIterator.Next()
fmt.Println(block)
for _,tx:=range block.Txs {
//txHash
//Vins
if tx.IsCoinBaseTransaction() == false {
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
for _, in := range tx.Vins {
//是否解锁
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Printf("%v\n",spentTXOutputs)
for _,tx:=range block.Txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
//fmt.Printf("ok is %s",ok)
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//终止遍历条件
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return unUTXOs
}
//去查找可用的output,即将被消费
func(blockchain *BlockChain) FindSpendableUTXOs(from string,amount int,txs []*Transaction)(int64,map[string][]int){
//1.获取所有UTXOs
utxos:=blockchain.UnUTXOs(from,txs)
spendAbleUTXO := make(map[string][]int)
//2.遍历utxos
var value int64
for _,utxo := range utxos{
value = value + utxo.Output.Value
hash := hex.EncodeToString(utxo.TxHash)
spendAbleUTXO[hash] = append(spendAbleUTXO[hash],utxo.Index)
if value>=int64(amount){
break
}
}
if value < int64(amount){
fmt.Printf("%s's fund isnt enough\n",from)
os.Exit(1)
}
return value,spendAbleUTXO
}
//挖掘新的区块
func (blockchain *BlockChain)MineNewBlock(from []string,to []string,amount []string,nodeId string){
//fmt.Println(from)
//fmt.Println(to)
//fmt.Println(amount)
//1.通过相关算法建立交易数组
//main.exe send -from "['liyuechun']" -to "['zhangqiang']" -amount "['2']"
utxoSet := &UTXOSet{blockchain}
var txs []*Transaction
var block *Block
//奖励from的第一个(先添加奖励余额,该余额可以被使用)
tx := NewCoinbaseTransaction(from[0])
txs = append(txs,tx)
//处理所有的交易
for index,_ := range from{
amountint,_ := strconv.Atoi(amount[index])
//可能有多比交易,之前的交易还未存储到数据库中,在新建交易时,需要考虑已有的未保存的交易,因此传入txs
tx := NewSimpleTransaction(from[index],to[index],int64(amountint),utxoSet,txs,nodeId)
txs = append(txs,tx)
}
//blockchain.AddBlockToBlockchain(txs)
blockchain.DB.View(func(tx *bolt.Tx) error {
b :=tx.Bucket([]byte(blockTableName))
if b!=nil{
hash:=b.Get([]byte("l"))
blockBytes := b.Get(hash)
block = DeSerialize(blockBytes)
}
return nil
})
//在建立新区块之前,要进行数字签名的验证
for _,tx:= range txs{
if blockchain.VerifyTransaction(tx,txs)==false{
log.Panic("签名失败!")
//os.Exit(1)
}
}
//2. 建立新的区块
block =NewBlock(txs,block.Height+1,block.Hash)
////3.存储到数据库
blockchain.DB.Update(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
b.Put(block.Hash,block.Serialize())
b.Put([]byte("l"),block.Hash)
blockchain.Tip = block.Hash
}
return nil
})
}
func(blockchain *BlockChain)GetBalance(address string) int64{
utxos := blockchain.UnUTXOs(address,[]*Transaction{})
fmt.Println(utxos)
var amount int64
for _,utxo := range utxos{
amount += utxo.Output.Value
}
return amount
}
// 数字签名
func(bc *BlockChain)SignTransaction(tx *Transaction,private ecdsa.PrivateKey,txs []*Transaction){
if tx.IsCoinBaseTransaction(){
return
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
tx.Sign(private,prevTxs)
}
//找签名相关交易
func(bc *BlockChain) FindTransaction(txHash []byte,txs []*Transaction)(Transaction,error){
for _,tx:=range txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
bci := bc.Iterator()
var hashInt big.Int
for{
block := bci.Next()
for _,tx:=range block.Txs | ash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return Transaction{},errors.New("Transaction is not found")
}
func(bc *BlockChain) VerifyTransaction(tx *Transaction,txs []*Transaction) bool{
if tx.IsCoinBaseTransaction(){
return true
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
fmt.Println("1111")
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
return tx.Verify(prevTxs)
}
//[string]*TXoutputs
func(blc *BlockChain)FindUTXOMap()map[string]*TXOutputs{
blcIterator := blc.Iterator()
//存储已花费的utxo信息
spentUTXOsMap := make(map[string][]*TXInput)
utxoMap := make(map[string]*TXOutputs)
//1.spentUTXOsMap := make(map[string][]int)
for{
block := blcIterator.Next()
for i := len(block.Txs)-1;i>=0;i--{
txOutputs := &TXOutputs{[]*UTXO{}}
tx := block.Txs[i]
txHash := hex.EncodeToString(tx.TxHash)
//coinbase
//添加记录已花费的
if tx.IsCoinBaseTransaction()==false{
for _,txInput:=range tx.Vins{
txInputHash := hex.EncodeToString(txInput.TXHash)
spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput)
//1.spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput.Vout)
}
}
for index,out := range tx.Vouts{
txInputs := spentUTXOsMap[txHash]
if len(txInputs) > 0 {
//是否消费,默认没有
flag := false
for _,in := range txInputs{
outPublicKey:= out.Ripemd160Hash
inPublicKey := in.Pubkey
if bytes.Compare(outPublicKey,Ripemd160Hash(inPublicKey))==0 && index==in.Vout{
flag = true
break
}
}
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}else{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}
//设置键值对
utxoMap[txHash] = txOutputs
}
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return utxoMap
}
// 获取当前区块链的高度
func(bc *BlockChain) GetHeight() int64{
return bc.Iterator().Next().Height
}
// 获取区块链所有的区块哈希
func(bc *BlockChain) GetBlockHashes() [][]byte{
var blockHashes [][]byte
bcit := bc.Iterator()
for{
block:=bcit.Next()
blockHashes = append(blockHashes,block.Hash)
if block.isBreakLoop(){
break
}
}
return blockHashes
}
//判断是否是创世区块
func (block *Block)isBreakLoop() bool{
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
return true
}else{
return false
}
}
// 获取指定哈希的区块
func (bc *BlockChain) GetBlock(hash []byte) []byte{
var blockByte []byte
bc.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blockTableName))
if b!=nil{
blockByte = b.Get(hash)
}
return nil
})
return blockByte
}
//添加区块
func(bc *BlockChain) AddBlock(block *Block){
err:=bc.DB.Update(func(tx *bolt.Tx) error {
//1.获取数据表
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
//判断需要传入的区块是否存在
if b.Get(block.Hash)!=nil{
//已经存在,不需要添加
return nil
}
//不存在,添加到数据库
err:=b.Put(block.Hash,block.Serialize())
if err!=nil{
log.Printf("sync the block failed! %v\n",err)
}
}
blockHash := b.Get([]byte("l"))
latesBlock := b.Get(blockHash)
rawBlock := DeSerialize(latesBlock)
if rawBlock.Height<block.Height{
b.Put([]byte("l"),block.Hash)
bc.Tip = block.Hash
}
return nil
})
if err!=nil{
log.Printf("update the db when insert the new block failed!%v\n",err)
}
fmt.Printf("the new block is added!\n")
} | {
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
hashInt.SetBytes(block.PrevBlockH | conditional_block |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID string) *BlockChain {
if !dbExsits(nodeID){
fmt.Println("创世区块不存在!")
os.Exit(1)
}
var Tip []byte
dbName := fmt.Sprintf(dbName,nodeID)
fmt.Println(dbName)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
err = db.View(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
Tip = b.Get([]byte("l"))
}
return nil
})
if err!=nil{
log.Panic(err)
}
//fmt.Println("读取最新区块链成功!")
//读取区块链
return &BlockChain{Tip,db}
}
//1. 创建带有创世区块的区块链
func CreateBlockchainWithGenesisBlock(address string,nodeID string) *BlockChain{
//判断是否存在数据库
if(dbExsits(nodeID)){
fmt.Println("创世区块已存在!")
//在数据库中读取最新区块链
os.Exit(1)
}
// 当数据库不存在时,创建创世区块链
fmt.Println("正在创建创世区块。。。")
dbName := fmt.Sprintf(dbName,nodeID)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
//defer db.Close()
var genesisBlock *Block
err = db.Update(func(tx *bolt.Tx)error{
b,err := tx.CreateBucket([]byte(blockTableName))
if err!=nil{
log.Panic(err)
}
if b!=nil{
//创建一个coinbase transaction
txCoinbase := NewCoinbaseTransaction(address)
//创建创世区块
genesisBlock = CreateGenesisBlock([]*Transaction{txCoinbase})
//将创世区块放入数据库
err=b.Put(genesisBlock.Hash,genesisBlock.Serialize())
if err!=nil{
log.Panic(err)
}
//存储最新的区块的hash
err=b.Put([]byte("l"),genesisBlock.Hash)
if err!=nil{
log.Panic(err)
}
}
return nil
})
fmt.Println("创建创世区块成功!")
return &BlockChain{genesisBlock.Hash,db}
}
// 增加区块到区块链中
//func(blc *BlockChain) AddBlockToBlockchain(txs []*Transaction){
// var height int64
// var preHash []byte
// //获取新增区块的height和preHash
// fmt.Println("开始挖矿。。。")
// err:=blc.DB.View(func(tx *bolt.Tx)error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// //blockHash := b.Get([]byte("l"))
// block := DeSerialize(b.Get(blc.Tip))
// height = block.Height+1
// preHash = block.Hash
// }
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
// // 创建新区块并添加数据库
//
// err = blc.DB.Update(func(tx *bolt.Tx) error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// newBlock := NewBlock(txs,height,preHash)
// newBlockByte := newBlock.Serialize()
// //添加区块信息值数据库
// err :=b.Put(newBlock.Hash,newBlockByte)
// if err!=nil{
// log.Panic(err)
// }
//
// //更新区块链的Tip以及数据库的l
// blc.Tip = newBlock.Hash
// b.Put([]byte("l"),newBlock.Hash)
// fmt.Println("挖矿成功!")
// }
//
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
//
//}
// 遍历输出所有区块的信息
func (blc *BlockChain) PrintChain(){
blockchainIterator := blc.Iterator()
for {
block := blockchainIterator.Next()
fmt.Printf("Height: %d\n",block.Height)
fmt.Printf("PrevBlockHash: %x\n",block.PrevBlockHash)
fmt.Printf("TimeStamp: %s\n",time.Unix(block.Timestamp,0).Format("2006-01-02 15:04:05"))
fmt.Printf("Hash: %x\n",block.Hash)
fmt.Printf("Nonce: %d\n",block.Nonce)
fmt.Println("Txs:")
for _,tx := range block.Txs{
fmt.Printf("%x\n",tx.TxHash)
fmt.Println("Vins:")
for _,in:=range tx.Vins{
fmt.Printf("%x\n",in.TXHash)
fmt.Printf("%d\n",in.Vout)
//fmt.Printf("%x\n",in.Signature)
fmt.Printf("%x\n",in.Pubkey)
}
fmt.Println("Vouts:")
for _,out:=range tx.Vouts{
fmt.Printf("%d\n",out.Value)
fmt.Printf("%v\n",out.Ripemd160Hash)
}
}
fmt.Println("----------------------------------------------------------------------------------")
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
}
// 判断数据库是否存在
func dbExsits(nodeID string) bool{
//生成不同节点的数据库文件
dbName := fmt.Sprintf(dbName,nodeID)
if _,err := os.Stat(dbName);os.IsNotExist(err){
return false
}
return true
}
//如果一个地址所对应的TXout未花费,那么这个就应该被添加到数组中
func (blockchain *BlockChain) UnUTXOs(address string,txs []*Transaction)[]*UTXO{
var unUTXOs []*UTXO
spentTXOutputs := make(map[string][]int)
//
//pubKeyHash := Base58Decode([]byte(address))
//ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
//fmt.Printf("转换后%v\n",ripemd160Hash)
// 处理未加入数据库中的交易
for _,tx := range txs {
if tx.IsCoinBaseTransaction() == false {
for _, in := range tx.Vins {
//是否解锁
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Println(spentTXOutputs)
for _,tx:=range txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//迭代数据库
blockIterator := blockchain.Iterator()
for{
block:=blockIterator.Next()
fmt.Println(block)
for _,tx:=range block.Txs {
//txHash
//Vins
if tx.IsCoinBaseTransaction() == false {
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
for _, in := range tx.Vins {
//是否解锁
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Printf("%v\n",spentTXOutputs)
for _,tx:=range block.Txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
//fmt.Printf("ok is %s",ok)
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//终止遍历条件
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return unUTXOs
}
//去查找可用的output,即将被消费
func(blockchain *BlockChain) FindSpendableUTXOs(from string,amount int,txs []*Transaction)(int64,map[string][]int){
//1.获取所有UTXOs
utxos:=blockchain.UnUTXOs(from,txs)
spendAbleUTXO := make(map[string][]int)
//2.遍历utxos
var value int64
for _,utxo := range utxos{
value = value + utxo.Output.Value
hash := hex.EncodeToString(utxo.TxHash)
spendAbleUTXO[hash] = append(spendAbleUTXO[hash],utxo.Index)
if value>=int64(amount){
break
}
}
if value < int64(amount){
fmt.Printf("%s's fund isnt enough\n",from)
os.Exit(1)
}
return value,spendAbleUTXO
}
//挖掘新的区块
func (blockchain *BlockChain)MineNewBlock(from []string,to []string,amount []string,nodeId string){
//fmt.Println(from)
//fmt.Println(to)
//fmt.Println(amount)
//1.通过相关算法建立交易数组
//main.exe send -from "['liyuechun']" -to "['zhangqiang']" -amount "['2']"
utxoSet := &UTXOSet{blockchain}
var txs []*Transaction
var block *Block
//奖励from的第一个(先添加奖励余额,该余额可以被使用)
tx := NewCoinbaseTransaction(from[0])
txs = append(txs,tx)
//处理所有的交易
for index,_ := range from{
amountint,_ := strconv.Atoi(amount[index])
//可能有多比交易,之前的交易还未存储到数据库中,在新建交易时,需要考虑已有的未保存的交易,因此传入txs
tx := NewSimpleTransaction(from[index],to[index],int64(amountint),utxoSet,txs,nodeId)
txs = append(txs,tx)
}
//blockchain.AddBlockToBlockchain(txs)
blockchain.DB.View(func(tx *bolt.Tx) error {
b :=tx.Bucket([]byte(blockTableName))
if b!=nil{
hash:=b.Get([]byte("l"))
blockBytes := b.Get(hash)
block = DeSerialize(blockBytes)
}
return nil
})
//在建立新区块之前,要进行数字签名的验证
for _,tx:= range txs{
if blockchain.VerifyTransaction(tx,txs)==false{
log.Panic("签名失败!")
//os.Exit(1)
}
}
//2. 建立新的区块
block =NewBlock(txs,block.Height+1,block.Hash)
////3.存储到数据库
blockchain.DB.Update(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
b.Put(block.Hash,block.Serialize())
b.Put([]byte("l"),block.Hash)
blockchain.Tip = block.Hash
}
| func(blockchain *BlockChain)GetBalance(address string) int64{
utxos := blockchain.UnUTXOs(address,[]*Transaction{})
fmt.Println(utxos)
var amount int64
for _,utxo := range utxos{
amount += utxo.Output.Value
}
return amount
}
// 数字签名
func(bc *BlockChain)SignTransaction(tx *Transaction,private ecdsa.PrivateKey,txs []*Transaction){
if tx.IsCoinBaseTransaction(){
return
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
tx.Sign(private,prevTxs)
}
//找签名相关交易
func(bc *BlockChain) FindTransaction(txHash []byte,txs []*Transaction)(Transaction,error){
for _,tx:=range txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
bci := bc.Iterator()
var hashInt big.Int
for{
block := bci.Next()
for _,tx:=range block.Txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return Transaction{},errors.New("Transaction is not found")
}
func(bc *BlockChain) VerifyTransaction(tx *Transaction,txs []*Transaction) bool{
if tx.IsCoinBaseTransaction(){
return true
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
fmt.Println("1111")
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
return tx.Verify(prevTxs)
}
//[string]*TXoutputs
func(blc *BlockChain)FindUTXOMap()map[string]*TXOutputs{
blcIterator := blc.Iterator()
//存储已花费的utxo信息
spentUTXOsMap := make(map[string][]*TXInput)
utxoMap := make(map[string]*TXOutputs)
//1.spentUTXOsMap := make(map[string][]int)
for{
block := blcIterator.Next()
for i := len(block.Txs)-1;i>=0;i--{
txOutputs := &TXOutputs{[]*UTXO{}}
tx := block.Txs[i]
txHash := hex.EncodeToString(tx.TxHash)
//coinbase
//添加记录已花费的
if tx.IsCoinBaseTransaction()==false{
for _,txInput:=range tx.Vins{
txInputHash := hex.EncodeToString(txInput.TXHash)
spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput)
//1.spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput.Vout)
}
}
for index,out := range tx.Vouts{
txInputs := spentUTXOsMap[txHash]
if len(txInputs) > 0 {
//是否消费,默认没有
flag := false
for _,in := range txInputs{
outPublicKey:= out.Ripemd160Hash
inPublicKey := in.Pubkey
if bytes.Compare(outPublicKey,Ripemd160Hash(inPublicKey))==0 && index==in.Vout{
flag = true
break
}
}
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}else{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}
//设置键值对
utxoMap[txHash] = txOutputs
}
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return utxoMap
}
// 获取当前区块链的高度
func(bc *BlockChain) GetHeight() int64{
return bc.Iterator().Next().Height
}
// 获取区块链所有的区块哈希
func(bc *BlockChain) GetBlockHashes() [][]byte{
var blockHashes [][]byte
bcit := bc.Iterator()
for{
block:=bcit.Next()
blockHashes = append(blockHashes,block.Hash)
if block.isBreakLoop(){
break
}
}
return blockHashes
}
//判断是否是创世区块
func (block *Block)isBreakLoop() bool{
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
return true
}else{
return false
}
}
// 获取指定哈希的区块
func (bc *BlockChain) GetBlock(hash []byte) []byte{
var blockByte []byte
bc.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blockTableName))
if b!=nil{
blockByte = b.Get(hash)
}
return nil
})
return blockByte
}
//添加区块
func(bc *BlockChain) AddBlock(block *Block){
err:=bc.DB.Update(func(tx *bolt.Tx) error {
//1.获取数据表
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
//判断需要传入的区块是否存在
if b.Get(block.Hash)!=nil{
//已经存在,不需要添加
return nil
}
//不存在,添加到数据库
err:=b.Put(block.Hash,block.Serialize())
if err!=nil{
log.Printf("sync the block failed! %v\n",err)
}
}
blockHash := b.Get([]byte("l"))
latesBlock := b.Get(blockHash)
rawBlock := DeSerialize(latesBlock)
if rawBlock.Height<block.Height{
b.Put([]byte("l"),block.Hash)
bc.Tip = block.Hash
}
return nil
})
if err!=nil{
log.Printf("update the db when insert the new block failed!%v\n",err)
}
fmt.Printf("the new block is added!\n")
} | return nil
})
} | random_line_split |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID string) *BlockChain {
if !dbExsits(nodeID){
fmt.Println("创世区块不存在!")
os.Exit(1)
}
var Tip []byte
dbName := fmt.Sprintf(dbName,nodeID)
fmt.Println(dbName)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
err = db.View(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
Tip = b.Get([]byte("l"))
}
return nil
})
if err!=nil{
log.Panic(err)
}
//fmt.Println("读取最新区块链成功!")
//读取区块链
return &BlockChain{Tip,db}
}
//1. 创建带有创世区块的区块链
func CreateBlockchainWithGenesisBlock(address string,nodeID string) *BlockChain{
//判断是否存在数据库
if(dbExsits(nodeID)){
fmt.Println("创世区块已存在!")
//在数据库中读取最新区块链 | if b!=nil{
// //blockHash := b.Get([]byte("l"))
// block := DeSerialize(b.Get(blc.Tip))
// height = block.Height+1
// preHash = block.Hash
// }
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
// // 创建新区块并添加数据库
//
// err = blc.DB.Update(func(tx *bolt.Tx) error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// newBlock := NewBlock(txs,height,preHash)
// newBlockByte := newBlock.Serialize()
// //添加区块信息值数据库
// err :=b.Put(newBlock.Hash,newBlockByte)
// if err!=nil{
// log.Panic(err)
// }
//
// //更新区块链的Tip以及数据库的l
// blc.Tip = newBlock.Hash
// b.Put([]byte("l"),newBlock.Hash)
// fmt.Println("挖矿成功!")
// }
//
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
//
//}
// 遍历输出所有区块的信息
func (blc *BlockChain) PrintChain(){
blockchainIterator := blc.Iterator()
for {
block := blockchainIterator.Next()
fmt.Printf("Height: %d\n",block.Height)
fmt.Printf("PrevBlockHash: %x\n",block.PrevBlockHash)
fmt.Printf("TimeStamp: %s\n",time.Unix(block.Timestamp,0).Format("2006-01-02 15:04:05"))
fmt.Printf("Hash: %x\n",block.Hash)
fmt.Printf("Nonce: %d\n",block.Nonce)
fmt.Println("Txs:")
for _,tx := range block.Txs{
fmt.Printf("%x\n",tx.TxHash)
fmt.Println("Vins:")
for _,in:=range tx.Vins{
fmt.Printf("%x\n",in.TXHash)
fmt.Printf("%d\n",in.Vout)
//fmt.Printf("%x\n",in.Signature)
fmt.Printf("%x\n",in.Pubkey)
}
fmt.Println("Vouts:")
for _,out:=range tx.Vouts{
fmt.Printf("%d\n",out.Value)
fmt.Printf("%v\n",out.Ripemd160Hash)
}
}
fmt.Println("----------------------------------------------------------------------------------")
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
}
// 判断数据库是否存在
func dbExsits(nodeID string) bool{
//生成不同节点的数据库文件
dbName := fmt.Sprintf(dbName,nodeID)
if _,err := os.Stat(dbName);os.IsNotExist(err){
return false
}
return true
}
//如果一个地址所对应的TXout未花费,那么这个就应该被添加到数组中
func (blockchain *BlockChain) UnUTXOs(address string,txs []*Transaction)[]*UTXO{
var unUTXOs []*UTXO
spentTXOutputs := make(map[string][]int)
//
//pubKeyHash := Base58Decode([]byte(address))
//ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
//fmt.Printf("转换后%v\n",ripemd160Hash)
// 处理未加入数据库中的交易
for _,tx := range txs {
if tx.IsCoinBaseTransaction() == false {
for _, in := range tx.Vins {
//是否解锁
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Println(spentTXOutputs)
for _,tx:=range txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//迭代数据库
blockIterator := blockchain.Iterator()
for{
block:=blockIterator.Next()
fmt.Println(block)
for _,tx:=range block.Txs {
//txHash
//Vins
if tx.IsCoinBaseTransaction() == false {
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
for _, in := range tx.Vins {
//是否解锁
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Printf("%v\n",spentTXOutputs)
for _,tx:=range block.Txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
//fmt.Printf("ok is %s",ok)
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//终止遍历条件
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return unUTXOs
}
//去查找可用的output,即将被消费
func(blockchain *BlockChain) FindSpendableUTXOs(from string,amount int,txs []*Transaction)(int64,map[string][]int){
//1.获取所有UTXOs
utxos:=blockchain.UnUTXOs(from,txs)
spendAbleUTXO := make(map[string][]int)
//2.遍历utxos
var value int64
for _,utxo := range utxos{
value = value + utxo.Output.Value
hash := hex.EncodeToString(utxo.TxHash)
spendAbleUTXO[hash] = append(spendAbleUTXO[hash],utxo.Index)
if value>=int64(amount){
break
}
}
if value < int64(amount){
fmt.Printf("%s's fund isnt enough\n",from)
os.Exit(1)
}
return value,spendAbleUTXO
}
//挖掘新的区块
func (blockchain *BlockChain)MineNewBlock(from []string,to []string,amount []string,nodeId string){
//fmt.Println(from)
//fmt.Println(to)
//fmt.Println(amount)
//1.通过相关算法建立交易数组
//main.exe send -from "['liyuechun']" -to "['zhangqiang']" -amount "['2']"
utxoSet := &UTXOSet{blockchain}
var txs []*Transaction
var block *Block
//奖励from的第一个(先添加奖励余额,该余额可以被使用)
tx := NewCoinbaseTransaction(from[0])
txs = append(txs,tx)
//处理所有的交易
for index,_ := range from{
amountint,_ := strconv.Atoi(amount[index])
//可能有多比交易,之前的交易还未存储到数据库中,在新建交易时,需要考虑已有的未保存的交易,因此传入txs
tx := NewSimpleTransaction(from[index],to[index],int64(amountint),utxoSet,txs,nodeId)
txs = append(txs,tx)
}
//blockchain.AddBlockToBlockchain(txs)
blockchain.DB.View(func(tx *bolt.Tx) error {
b :=tx.Bucket([]byte(blockTableName))
if b!=nil{
hash:=b.Get([]byte("l"))
blockBytes := b.Get(hash)
block = DeSerialize(blockBytes)
}
return nil
})
//在建立新区块之前,要进行数字签名的验证
for _,tx:= range txs{
if blockchain.VerifyTransaction(tx,txs)==false{
log.Panic("签名失败!")
//os.Exit(1)
}
}
//2. 建立新的区块
block =NewBlock(txs,block.Height+1,block.Hash)
////3.存储到数据库
blockchain.DB.Update(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
b.Put(block.Hash,block.Serialize())
b.Put([]byte("l"),block.Hash)
blockchain.Tip = block.Hash
}
return nil
})
}
func(blockchain *BlockChain)GetBalance(address string) int64{
utxos := blockchain.UnUTXOs(address,[]*Transaction{})
fmt.Println(utxos)
var amount int64
for _,utxo := range utxos{
amount += utxo.Output.Value
}
return amount
}
// 数字签名
func(bc *BlockChain)SignTransaction(tx *Transaction,private ecdsa.PrivateKey,txs []*Transaction){
if tx.IsCoinBaseTransaction(){
return
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
tx.Sign(private,prevTxs)
}
//找签名相关交易
func(bc *BlockChain) FindTransaction(txHash []byte,txs []*Transaction)(Transaction,error){
for _,tx:=range txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
bci := bc.Iterator()
var hashInt big.Int
for{
block := bci.Next()
for _,tx:=range block.Txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return Transaction{},errors.New("Transaction is not found")
}
func(bc *BlockChain) VerifyTransaction(tx *Transaction,txs []*Transaction) bool{
if tx.IsCoinBaseTransaction(){
return true
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
fmt.Println("1111")
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
return tx.Verify(prevTxs)
}
//[string]*TXoutputs
func(blc *BlockChain)FindUTXOMap()map[string]*TXOutputs{
blcIterator := blc.Iterator()
//存储已花费的utxo信息
spentUTXOsMap := make(map[string][]*TXInput)
utxoMap := make(map[string]*TXOutputs)
//1.spentUTXOsMap := make(map[string][]int)
for{
block := blcIterator.Next()
for i := len(block.Txs)-1;i>=0;i--{
txOutputs := &TXOutputs{[]*UTXO{}}
tx := block.Txs[i]
txHash := hex.EncodeToString(tx.TxHash)
//coinbase
//添加记录已花费的
if tx.IsCoinBaseTransaction()==false{
for _,txInput:=range tx.Vins{
txInputHash := hex.EncodeToString(txInput.TXHash)
spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput)
//1.spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput.Vout)
}
}
for index,out := range tx.Vouts{
txInputs := spentUTXOsMap[txHash]
if len(txInputs) > 0 {
//是否消费,默认没有
flag := false
for _,in := range txInputs{
outPublicKey:= out.Ripemd160Hash
inPublicKey := in.Pubkey
if bytes.Compare(outPublicKey,Ripemd160Hash(inPublicKey))==0 && index==in.Vout{
flag = true
break
}
}
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}else{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}
//设置键值对
utxoMap[txHash] = txOutputs
}
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return utxoMap
}
// 获取当前区块链的高度
func(bc *BlockChain) GetHeight() int64{
return bc.Iterator().Next().Height
}
// 获取区块链所有的区块哈希
func(bc *BlockChain) GetBlockHashes() [][]byte{
var blockHashes [][]byte
bcit := bc.Iterator()
for{
block:=bcit.Next()
blockHashes = append(blockHashes,block.Hash)
if block.isBreakLoop(){
break
}
}
return blockHashes
}
//判断是否是创世区块
func (block *Block)isBreakLoop() bool{
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
return true
}else{
return false
}
}
// 获取指定哈希的区块
func (bc *BlockChain) GetBlock(hash []byte) []byte{
var blockByte []byte
bc.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blockTableName))
if b!=nil{
blockByte = b.Get(hash)
}
return nil
})
return blockByte
}
//添加区块
func(bc *BlockChain) AddBlock(block *Block){
err:=bc.DB.Update(func(tx *bolt.Tx) error {
//1.获取数据表
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
//判断需要传入的区块是否存在
if b.Get(block.Hash)!=nil{
//已经存在,不需要添加
return nil
}
//不存在,添加到数据库
err:=b.Put(block.Hash,block.Serialize())
if err!=nil{
log.Printf("sync the block failed! %v\n",err)
}
}
blockHash := b.Get([]byte("l"))
latesBlock := b.Get(blockHash)
rawBlock := DeSerialize(latesBlock)
if rawBlock.Height<block.Height{
b.Put([]byte("l"),block.Hash)
bc.Tip = block.Hash
}
return nil
})
if err!=nil{
log.Printf("update the db when insert the new block failed!%v\n",err)
}
fmt.Printf("the new block is added!\n")
} |
os.Exit(1)
}
// 当数据库不存在时,创建创世区块链
fmt.Println("正在创建创世区块。。。")
dbName := fmt.Sprintf(dbName,nodeID)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
//defer db.Close()
var genesisBlock *Block
err = db.Update(func(tx *bolt.Tx)error{
b,err := tx.CreateBucket([]byte(blockTableName))
if err!=nil{
log.Panic(err)
}
if b!=nil{
//创建一个coinbase transaction
txCoinbase := NewCoinbaseTransaction(address)
//创建创世区块
genesisBlock = CreateGenesisBlock([]*Transaction{txCoinbase})
//将创世区块放入数据库
err=b.Put(genesisBlock.Hash,genesisBlock.Serialize())
if err!=nil{
log.Panic(err)
}
//存储最新的区块的hash
err=b.Put([]byte("l"),genesisBlock.Hash)
if err!=nil{
log.Panic(err)
}
}
return nil
})
fmt.Println("创建创世区块成功!")
return &BlockChain{genesisBlock.Hash,db}
}
// 增加区块到区块链中
//func(blc *BlockChain) AddBlockToBlockchain(txs []*Transaction){
// var height int64
// var preHash []byte
// //获取新增区块的height和preHash
// fmt.Println("开始挖矿。。。")
// err:=blc.DB.View(func(tx *bolt.Tx)error{
// b := tx.Bucket([]byte(blockTableName))
// | identifier_body |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID string) *BlockChain {
if !dbExsits(nodeID){
fmt.Println("创世区块不存在!")
os.Exit(1)
}
var Tip []byte
dbName := fmt.Sprintf(dbName,nodeID)
fmt.Println(dbName)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
err = db.View(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
Tip = b.Get([]byte("l"))
}
return nil
})
if err!=nil{
log.Panic(err)
}
//fmt.Println("读取最新区块链成功!")
//读取区块链
return &BlockChain{Tip,db}
}
//1. 创建带有创世区块的区块链
func CreateBlockchainWithGenesisBlock(address string,nodeID string) *BlockChain{
//判断是否存在数据库
if(dbExsits(nodeID)){
fmt.Println("创世区块已存在!")
//在数据库中读取最新区块链
os.Exit(1)
}
// 当数据库不存在时,创建创世区块链
fmt.Println("正在创建创世区块。。。")
dbName := fmt.Sprintf(dbName,nodeID)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
//defer db.Close()
var genesisBlock *Block
err = db.Update(func(tx *bolt.Tx)error{
b,err := tx.CreateBucket([]byte(blockTableName))
if err!=nil{
log.Panic(err)
}
if b!=nil{
//创建一个coinbase transaction
txCoinbase := NewCoinbaseTransaction(address)
//创建创世区块
genesisBlock = CreateGenesisBlock([]*Transaction{txCoinbase})
//将创世区块放入数据库
err=b.Put(genesisBlock.Hash,genesisBlock.Serialize())
if err!=nil{
log.Panic(err)
}
//存储最新的区块的hash
err=b.Put([]byte("l"),genesisBlock.Hash)
if err!=nil{
log.Panic(err)
}
}
return nil
})
fmt.Println("创建创世区块成功!")
return &BlockChain{genesisBlock.Hash,db}
}
// 增加区块到区块链中
//func(blc *BlockChain) AddBlockToBlockchain(txs []*Transaction){
// var height int64
// var preHash []byte
// //获取新增区块的height和preHash
// fmt.Println("开始挖矿。。。")
// err:=blc.DB.View(func(tx *bolt.Tx)error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// //blockHash := b.Get([]byte("l"))
// block := DeSerialize(b.Get(blc.Tip))
// height = block.Height+1
// preHash = block.Hash
// }
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
// // 创建新区块并添加数据库
//
// err = blc.DB.Update(func(tx *bolt.Tx) error{
// b := tx.Bucket([]byte(blockTableName))
// if b!=nil{
// newBlock := NewBlock(txs,height,preHash)
// newBlockByte := newBlock.Serialize()
// //添加区块信息值数据库
// err :=b.Put(newBlock.Hash,newBlockByte)
// if err!=nil{
// log.Panic(err)
// }
//
// //更新区块链的Tip以及数据库的l
// blc.Tip = newBlock.Hash
// b.Put([]byte("l"),newBlock.Hash)
// fmt.Println("挖矿成功!")
// }
//
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
//
//}
// 遍历输出所有区块的信息
func (blc *BlockChain) PrintChain(){
blockchainIterator := blc.Iterator()
for {
block := blockchainIterator.Next()
fmt.Printf("Height: %d\n",block.Height)
fmt.Printf("PrevBlockHash: %x\n",block.PrevBlockHash)
fmt.Printf("TimeStamp: %s\n",time.Unix(block.Timestamp,0).Format("2006-01-02 15:04:05"))
fmt.Printf("Hash: %x\n",block.Hash)
fmt.Printf("Nonce: %d\n",block.Nonce)
fmt.Println("Txs:")
for _,tx := range block.Txs{
fmt.Printf("%x\n",tx.TxHash)
fmt.Println("Vins:")
for _,in:=range tx.Vins{
fmt.Printf("%x\n",in.TXHash)
fmt.Printf("%d\n",in.Vout)
//fmt.Printf("%x\n",in.Signature)
fmt.Printf("%x\n",in.Pubkey)
}
fmt.Println("Vouts:")
for _,out:=range tx.Vouts{
fmt.Printf("%d\n",out.Value)
fmt.Printf("%v\n",out.Ripemd160Hash)
}
}
fmt.Println("----------------------------------------------------------------------------------")
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
}
// 判断数据库是否存在
func dbExsits(nodeID string) bool{
//生成不同节点的数据库文件
dbName := fmt.Sprintf(dbName,nodeID)
if _,err := os.Stat(dbName);os.IsNotExist(err){
return false
}
return true
}
//如果一个地址所对应的TXout未花费,那么这个就应该被添加到数组中
func (blockchain *BlockChain) UnUTXOs(address string,txs []*Transaction)[]*UTXO{
var unUTXOs []*UTXO
spentTXOutputs := make(map[string][]int)
//
//pubKeyHash := Base58Decode([]byte(address))
//ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
//fmt.Printf("转换后%v\n",ripemd160Hash)
// 处理未加入数据库中的交易
for _,tx := range txs {
if tx.IsCoinBaseTransaction() == false {
for _, in := range tx.Vins {
//是否解锁
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
if in.UnLockRipemd1 | ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Println(spentTXOutputs)
for _,tx:=range txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//迭代数据库
blockIterator := blockchain.Iterator()
for{
block:=blockIterator.Next()
fmt.Println(block)
for _,tx:=range block.Txs {
//txHash
//Vins
if tx.IsCoinBaseTransaction() == false {
pubKeyHash := Base58Decode([]byte(address))
ripemd160Hash := pubKeyHash[1:len(pubKeyHash)-4]
for _, in := range tx.Vins {
//是否解锁
if in.UnLockRipemd160Hash(ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Printf("%v\n",spentTXOutputs)
for _,tx:=range block.Txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
//fmt.Printf("ok is %s",ok)
if ok==false{
for index,out := range tx.Vouts{
if out.UnLockWithAddress(address) {
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs, utxo)
}
}
} else{
//Vouts
for index,out := range tx.Vouts{
//判断是否花费
flag := false
//是否解锁
if out.UnLockWithAddress(address){
//判断是否被消费
for _,spentIndex := range spentArray{
if spentIndex==index{
flag = true
break
}
}
//遍历所有已记录花费,该outPut未花费
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
unUTXOs = append(unUTXOs,utxo)
}
}
}
}
}
//终止遍历条件
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return unUTXOs
}
//去查找可用的output,即将被消费
func(blockchain *BlockChain) FindSpendableUTXOs(from string,amount int,txs []*Transaction)(int64,map[string][]int){
//1.获取所有UTXOs
utxos:=blockchain.UnUTXOs(from,txs)
spendAbleUTXO := make(map[string][]int)
//2.遍历utxos
var value int64
for _,utxo := range utxos{
value = value + utxo.Output.Value
hash := hex.EncodeToString(utxo.TxHash)
spendAbleUTXO[hash] = append(spendAbleUTXO[hash],utxo.Index)
if value>=int64(amount){
break
}
}
if value < int64(amount){
fmt.Printf("%s's fund isnt enough\n",from)
os.Exit(1)
}
return value,spendAbleUTXO
}
//挖掘新的区块
func (blockchain *BlockChain)MineNewBlock(from []string,to []string,amount []string,nodeId string){
//fmt.Println(from)
//fmt.Println(to)
//fmt.Println(amount)
//1.通过相关算法建立交易数组
//main.exe send -from "['liyuechun']" -to "['zhangqiang']" -amount "['2']"
utxoSet := &UTXOSet{blockchain}
var txs []*Transaction
var block *Block
//奖励from的第一个(先添加奖励余额,该余额可以被使用)
tx := NewCoinbaseTransaction(from[0])
txs = append(txs,tx)
//处理所有的交易
for index,_ := range from{
amountint,_ := strconv.Atoi(amount[index])
//可能有多比交易,之前的交易还未存储到数据库中,在新建交易时,需要考虑已有的未保存的交易,因此传入txs
tx := NewSimpleTransaction(from[index],to[index],int64(amountint),utxoSet,txs,nodeId)
txs = append(txs,tx)
}
//blockchain.AddBlockToBlockchain(txs)
blockchain.DB.View(func(tx *bolt.Tx) error {
b :=tx.Bucket([]byte(blockTableName))
if b!=nil{
hash:=b.Get([]byte("l"))
blockBytes := b.Get(hash)
block = DeSerialize(blockBytes)
}
return nil
})
//在建立新区块之前,要进行数字签名的验证
for _,tx:= range txs{
if blockchain.VerifyTransaction(tx,txs)==false{
log.Panic("签名失败!")
//os.Exit(1)
}
}
//2. 建立新的区块
block =NewBlock(txs,block.Height+1,block.Hash)
////3.存储到数据库
blockchain.DB.Update(func(tx *bolt.Tx) error {
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
b.Put(block.Hash,block.Serialize())
b.Put([]byte("l"),block.Hash)
blockchain.Tip = block.Hash
}
return nil
})
}
func(blockchain *BlockChain)GetBalance(address string) int64{
utxos := blockchain.UnUTXOs(address,[]*Transaction{})
fmt.Println(utxos)
var amount int64
for _,utxo := range utxos{
amount += utxo.Output.Value
}
return amount
}
// 数字签名
func(bc *BlockChain)SignTransaction(tx *Transaction,private ecdsa.PrivateKey,txs []*Transaction){
if tx.IsCoinBaseTransaction(){
return
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
tx.Sign(private,prevTxs)
}
//找签名相关交易
func(bc *BlockChain) FindTransaction(txHash []byte,txs []*Transaction)(Transaction,error){
for _,tx:=range txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
bci := bc.Iterator()
var hashInt big.Int
for{
block := bci.Next()
for _,tx:=range block.Txs{
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return Transaction{},errors.New("Transaction is not found")
}
func(bc *BlockChain) VerifyTransaction(tx *Transaction,txs []*Transaction) bool{
if tx.IsCoinBaseTransaction(){
return true
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
fmt.Println("1111")
prevTx,err := bc.FindTransaction(vin.TXHash,txs)
if err!=nil{
log.Panic(err)
}
prevTxs [hex.EncodeToString(prevTx.TxHash)] = prevTx
}
return tx.Verify(prevTxs)
}
//[string]*TXoutputs
func(blc *BlockChain)FindUTXOMap()map[string]*TXOutputs{
blcIterator := blc.Iterator()
//存储已花费的utxo信息
spentUTXOsMap := make(map[string][]*TXInput)
utxoMap := make(map[string]*TXOutputs)
//1.spentUTXOsMap := make(map[string][]int)
for{
block := blcIterator.Next()
for i := len(block.Txs)-1;i>=0;i--{
txOutputs := &TXOutputs{[]*UTXO{}}
tx := block.Txs[i]
txHash := hex.EncodeToString(tx.TxHash)
//coinbase
//添加记录已花费的
if tx.IsCoinBaseTransaction()==false{
for _,txInput:=range tx.Vins{
txInputHash := hex.EncodeToString(txInput.TXHash)
spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput)
//1.spentUTXOsMap[txInputHash] = append(spentUTXOsMap[txInputHash],txInput.Vout)
}
}
for index,out := range tx.Vouts{
txInputs := spentUTXOsMap[txHash]
if len(txInputs) > 0 {
//是否消费,默认没有
flag := false
for _,in := range txInputs{
outPublicKey:= out.Ripemd160Hash
inPublicKey := in.Pubkey
if bytes.Compare(outPublicKey,Ripemd160Hash(inPublicKey))==0 && index==in.Vout{
flag = true
break
}
}
if flag == false{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}else{
utxo := &UTXO{tx.TxHash,index,out}
txOutputs.UTXOS = append(txOutputs.UTXOS,utxo)
}
}
//设置键值对
utxoMap[txHash] = txOutputs
}
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return utxoMap
}
// 获取当前区块链的高度
func(bc *BlockChain) GetHeight() int64{
return bc.Iterator().Next().Height
}
// 获取区块链所有的区块哈希
func(bc *BlockChain) GetBlockHashes() [][]byte{
var blockHashes [][]byte
bcit := bc.Iterator()
for{
block:=bcit.Next()
blockHashes = append(blockHashes,block.Hash)
if block.isBreakLoop(){
break
}
}
return blockHashes
}
//判断是否是创世区块
func (block *Block)isBreakLoop() bool{
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if hashInt.Cmp(big.NewInt(0))==0{
return true
}else{
return false
}
}
// 获取指定哈希的区块
func (bc *BlockChain) GetBlock(hash []byte) []byte{
var blockByte []byte
bc.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blockTableName))
if b!=nil{
blockByte = b.Get(hash)
}
return nil
})
return blockByte
}
//添加区块
func(bc *BlockChain) AddBlock(block *Block){
err:=bc.DB.Update(func(tx *bolt.Tx) error {
//1.获取数据表
b:=tx.Bucket([]byte(blockTableName))
if b!=nil{
//判断需要传入的区块是否存在
if b.Get(block.Hash)!=nil{
//已经存在,不需要添加
return nil
}
//不存在,添加到数据库
err:=b.Put(block.Hash,block.Serialize())
if err!=nil{
log.Printf("sync the block failed! %v\n",err)
}
}
blockHash := b.Get([]byte("l"))
latesBlock := b.Get(blockHash)
rawBlock := DeSerialize(latesBlock)
if rawBlock.Height<block.Height{
b.Put([]byte("l"),block.Hash)
bc.Tip = block.Hash
}
return nil
})
if err!=nil{
log.Printf("update the db when insert the new block failed!%v\n",err)
}
fmt.Printf("the new block is added!\n")
} | 60Hash( | identifier_name |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given path and build numpy array to form training and test data.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
:return: features, targets, and indexes for training and testing
"""
print('\nLoad the raw training and test set data...')
y_train, tx_train, ids_train = load_csv_data(train_file)
y_test, tx_test, ids_test = load_csv_data(test_file)
print('\n... finished.')
return y_train, tx_train, ids_train, y_test, tx_test, ids_test
def get_header(file):
"""
Get the header line from the given file
:param file: file name/ path
:return: dict object specifying the first header line from the file
"""
read_file = open(file, 'r')
reader = csv.DictReader(read_file)
return reader.fieldnames
def analyze(tx):
"""
Analyze data by replacing null value, -999, with the median of non-null value in the
certain column. Also, handle outliers by placing original value with upper and lower bound
(mean +- std from a feature distribution). Finally, record the columns that have zero
variance, which would be removed.
:param tx: raw training data
:return: the list of columns which will be deleted
"""
num_cols = tx.shape[1]
print('\nNumber of columns in the data matrix: ',num_cols)
columns_to_remove = []
print('Analysis of data:\n')
for col in range(num_cols):
current_col = tx[:, col]
if len(np.unique(current_col)) == 1:
print('The column with index ', col, ' is all the same, it will be removed.')
columns_to_remove.append(col)
else:
current_col[current_col == -999] = np.median(current_col[current_col != -999])
# Handling the outliers
std_current_col = np.std(current_col)
mean_current_col = np.mean(current_col)
lower_bound = mean_current_col - 2 * std_current_col
upper_bound = mean_current_col + 2 * std_current_col
current_col[current_col < lower_bound] = lower_bound
current_col[current_col > upper_bound] = upper_bound
print('Null values in the ', col, ' indexed column are replaced with the mean and outliers.')
return columns_to_remove
def remove_columns(tx, header, columns_to_remove):
"""
Remove the columns recorded in the variable, col_to_remove, from training data tx and
header.
:param tx: an array of training data
:param header: header array
:param columns_to_remove: the list indicating which column to be removed
:return: modified training data, tx, and header
"""
print("\nRemove columns...")
num_removed = 0
for col in columns_to_remove:
tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1
print("\n... finished.")
return tx, header
def create_csv(output_file, y, tx, ids, header, is_test):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
print('\nCreate new csv file named ' + str(output_file) + '...')
with open(output_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)
writer.writeheader()
for idx, y_row, tx_row in zip(ids, y, tx):
if is_test:
prediction = '?'
else:
prediction = 'b' if y_row == -1 else 's'
dictionary = {'Id': int(idx),'Prediction': prediction}
for index in range(len(tx_row)):
dictionary[header[index + 2]] = float(tx_row[index])
writer.writerow(dictionary)
print('\n... finished.')
def split_data(y, tx, ids, jet_num):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
mask = tx[:, 22] == jet_num
return y[mask], tx[mask], ids[mask]
def process_data(train_file, test_file):
"""
Create 4 new training dataset files and 4 new test dataset files.
First, split the initial data tests using the discrete valued feature jet number,
which can only take the values 0, 1, 2 and 3. Second, process the split data
sets by replacing null values and deleting zero variance features.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
"""
y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)
header_train = get_header(train_file)
header_test = get_header(test_file)
print('\nData set will be split into four, each representing data with different jet numbers.')
for jet_num in range(4):
print('\nProcess training set with jet number = ' + str(jet_num) + '...')
y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)
columns_to_remove = analyze(tx_train_jet)
tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)
create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)
print('\n... created train_jet_' + str(jet_num) + '.csv file.')
print('\nProcess test set with jet number = ' + str(jet_num) + '...')
y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)
columns_to_remove = analyze(tx_test_jet)
tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)
create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)
print('\n... created test_jet_' + str(jet_num) + '.csv file.')
def report_prediction_accuracy(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model applied on a set of labels.
:param y: labels
:param tx: training data
:param w: weights
:return: accuracy of predictions on a dataset
"""
predictions = tx.dot(w_best)
predictions[predictions >= 0] = 1
predictions[predictions < 0] = -1
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ', correct_percentage * 100, '%')
return correct_percentage
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups.
:param y: labels
:param k_fold: number of folds
:param seed: random generator seed
:return: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, augmented_tx, k_indices, k, lambda_, report_predictions=False):
"""
Perform cross_validation for a specific test set from the partitioned set.
:param y: label data
:param augmented_tx: augmented features
:param k_indices: An array of k sub-indices that are randomly partitioned
:param k: number of folds
:param lambda_: regularization parameters
:param report_predictions: report prediction or not
:return: root mean square of loss training error, prediction
"""
y_test = y[k_indices[k]]
y_train = np.delete(y, k_indices[k])
augmented_tx_test = augmented_tx[k_indices[k]]
augmented_tx_train = np.delete(augmented_tx, k_indices[k], axis = 0)
w, loss_train = ridge_regression(y_train, augmented_tx_train, lambda_)
pred = report_prediction_accuracy(y_test, augmented_tx_test, w, False)
return compute_rmse(loss_train), pred
def report_prediction_accuracy_logistic(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model that is applied
on a set of labels. This method specifically works for logistic regression
since the prediction assumes that labels are between 0 and 1.
:param y: labels
:param tx: training data
:param w_best: Optimized weight vector of the model
:return: the percentage of correct predictions of the model when it is applied on the given test set of labels
"""
predictions = tx.dot(w_best)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ',correct_percentage * 100, '%')
return correct_percentage
def train_test_split(y, tx, ratio, seed=1):
"""
Split a given training data set to a test set and a training set,
the sizes of the created sets are determined by the given ration.
:param y: labels
:param tx: training data
:param ratio: ratio for splitting training and testing data
:param seed: random seed
:return: numpy array of training and testing data
"""
np.random.seed(seed)
permutation = np.random.permutation(len(y))
shuffled_tx = tx[permutation]
shuffled_y = y[permutation]
split_position = int(len(y) * ratio)
tx_training, tx_test = shuffled_tx[: split_position], shuffled_tx[split_position:]
y_training, y_test = shuffled_y[: split_position], shuffled_y[split_position:]
return y_training, tx_training, y_test, tx_test
def standardize(x, mean_x=None, std_x=None):
"""
Standardize original data from the dataset.
:param x: data to standardize
:param mean_x: mean value of data given by the dataset
:param std_x: standard deviation of data given by the dataset
:return: standardized data
"""
if mean_x is None:
mean_x = np.mean(x,axis=0)
x = x - mean_x
if std_x is None:
std_x = np.std(x,axis=0)
x = x / std_x
return x, mean_x, std_x
def min_max_normalization(x, min_x = None, max_x = None):
"""
Normalize original data using the minimum and maximum value in the dataset
:param x: data to normalize
:param min_x: minimum value of data
:param max_x: maximum value of data
:return: normalized data
"""
if min_x is None:
min_x = np.min(x, axis=0)
if max_x is None:
max_x = np.max(x, axis=0)
return (x - (min_x)) / (max_x - min_x), min_x, max_x
def | (y):
"""
The labels in logistic regression are interpreted as probabilities,
so this method transfers the labels to the range [0, 1]
:param y: labels
:return: labels as probability
"""
y[y == -1] = 0
return y
| change_labels_logistic | identifier_name |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given path and build numpy array to form training and test data.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
:return: features, targets, and indexes for training and testing
"""
print('\nLoad the raw training and test set data...')
y_train, tx_train, ids_train = load_csv_data(train_file)
y_test, tx_test, ids_test = load_csv_data(test_file)
print('\n... finished.')
return y_train, tx_train, ids_train, y_test, tx_test, ids_test
def get_header(file):
"""
Get the header line from the given file
:param file: file name/ path
:return: dict object specifying the first header line from the file
"""
read_file = open(file, 'r')
reader = csv.DictReader(read_file)
return reader.fieldnames
def analyze(tx):
"""
Analyze data by replacing null value, -999, with the median of non-null value in the
certain column. Also, handle outliers by placing original value with upper and lower bound
(mean +- std from a feature distribution). Finally, record the columns that have zero
variance, which would be removed.
:param tx: raw training data
:return: the list of columns which will be deleted
"""
num_cols = tx.shape[1]
print('\nNumber of columns in the data matrix: ',num_cols)
columns_to_remove = []
print('Analysis of data:\n')
for col in range(num_cols):
current_col = tx[:, col]
if len(np.unique(current_col)) == 1:
print('The column with index ', col, ' is all the same, it will be removed.')
columns_to_remove.append(col)
else:
current_col[current_col == -999] = np.median(current_col[current_col != -999])
# Handling the outliers
std_current_col = np.std(current_col)
mean_current_col = np.mean(current_col)
lower_bound = mean_current_col - 2 * std_current_col
upper_bound = mean_current_col + 2 * std_current_col
current_col[current_col < lower_bound] = lower_bound
current_col[current_col > upper_bound] = upper_bound
print('Null values in the ', col, ' indexed column are replaced with the mean and outliers.')
return columns_to_remove
def remove_columns(tx, header, columns_to_remove):
"""
Remove the columns recorded in the variable, col_to_remove, from training data tx and
header.
:param tx: an array of training data
:param header: header array
:param columns_to_remove: the list indicating which column to be removed
:return: modified training data, tx, and header
"""
print("\nRemove columns...")
num_removed = 0
for col in columns_to_remove:
tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1
print("\n... finished.")
return tx, header
def create_csv(output_file, y, tx, ids, header, is_test):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
print('\nCreate new csv file named ' + str(output_file) + '...')
with open(output_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)
writer.writeheader()
for idx, y_row, tx_row in zip(ids, y, tx):
if is_test:
prediction = '?'
else:
prediction = 'b' if y_row == -1 else 's'
dictionary = {'Id': int(idx),'Prediction': prediction}
for index in range(len(tx_row)):
dictionary[header[index + 2]] = float(tx_row[index])
writer.writerow(dictionary)
print('\n... finished.')
def split_data(y, tx, ids, jet_num):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
mask = tx[:, 22] == jet_num
return y[mask], tx[mask], ids[mask]
def process_data(train_file, test_file):
"""
Create 4 new training dataset files and 4 new test dataset files.
First, split the initial data tests using the discrete valued feature jet number,
which can only take the values 0, 1, 2 and 3. Second, process the split data
sets by replacing null values and deleting zero variance features.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
"""
y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)
header_train = get_header(train_file)
header_test = get_header(test_file)
print('\nData set will be split into four, each representing data with different jet numbers.')
for jet_num in range(4):
print('\nProcess training set with jet number = ' + str(jet_num) + '...')
y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)
columns_to_remove = analyze(tx_train_jet)
tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)
create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)
print('\n... created train_jet_' + str(jet_num) + '.csv file.')
print('\nProcess test set with jet number = ' + str(jet_num) + '...')
y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)
columns_to_remove = analyze(tx_test_jet)
tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)
create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)
print('\n... created test_jet_' + str(jet_num) + '.csv file.')
def report_prediction_accuracy(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model applied on a set of labels.
:param y: labels
:param tx: training data
:param w: weights
:return: accuracy of predictions on a dataset
"""
predictions = tx.dot(w_best)
predictions[predictions >= 0] = 1
predictions[predictions < 0] = -1
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ', correct_percentage * 100, '%')
return correct_percentage
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups.
:param y: labels
:param k_fold: number of folds
:param seed: random generator seed
:return: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, augmented_tx, k_indices, k, lambda_, report_predictions=False):
|
def report_prediction_accuracy_logistic(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model that is applied
on a set of labels. This method specifically works for logistic regression
since the prediction assumes that labels are between 0 and 1.
:param y: labels
:param tx: training data
:param w_best: Optimized weight vector of the model
:return: the percentage of correct predictions of the model when it is applied on the given test set of labels
"""
predictions = tx.dot(w_best)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ',correct_percentage * 100, '%')
return correct_percentage
def train_test_split(y, tx, ratio, seed=1):
"""
Split a given training data set to a test set and a training set,
the sizes of the created sets are determined by the given ration.
:param y: labels
:param tx: training data
:param ratio: ratio for splitting training and testing data
:param seed: random seed
:return: numpy array of training and testing data
"""
np.random.seed(seed)
permutation = np.random.permutation(len(y))
shuffled_tx = tx[permutation]
shuffled_y = y[permutation]
split_position = int(len(y) * ratio)
tx_training, tx_test = shuffled_tx[: split_position], shuffled_tx[split_position:]
y_training, y_test = shuffled_y[: split_position], shuffled_y[split_position:]
return y_training, tx_training, y_test, tx_test
def standardize(x, mean_x=None, std_x=None):
"""
Standardize original data from the dataset.
:param x: data to standardize
:param mean_x: mean value of data given by the dataset
:param std_x: standard deviation of data given by the dataset
:return: standardized data
"""
if mean_x is None:
mean_x = np.mean(x,axis=0)
x = x - mean_x
if std_x is None:
std_x = np.std(x,axis=0)
x = x / std_x
return x, mean_x, std_x
def min_max_normalization(x, min_x = None, max_x = None):
"""
Normalize original data using the minimum and maximum value in the dataset
:param x: data to normalize
:param min_x: minimum value of data
:param max_x: maximum value of data
:return: normalized data
"""
if min_x is None:
min_x = np.min(x, axis=0)
if max_x is None:
max_x = np.max(x, axis=0)
return (x - (min_x)) / (max_x - min_x), min_x, max_x
def change_labels_logistic(y):
"""
The labels in logistic regression are interpreted as probabilities,
so this method transfers the labels to the range [0, 1]
:param y: labels
:return: labels as probability
"""
y[y == -1] = 0
return y
| """
Perform cross_validation for a specific test set from the partitioned set.
:param y: label data
:param augmented_tx: augmented features
:param k_indices: An array of k sub-indices that are randomly partitioned
:param k: number of folds
:param lambda_: regularization parameters
:param report_predictions: report prediction or not
:return: root mean square of loss training error, prediction
"""
y_test = y[k_indices[k]]
y_train = np.delete(y, k_indices[k])
augmented_tx_test = augmented_tx[k_indices[k]]
augmented_tx_train = np.delete(augmented_tx, k_indices[k], axis = 0)
w, loss_train = ridge_regression(y_train, augmented_tx_train, lambda_)
pred = report_prediction_accuracy(y_test, augmented_tx_test, w, False)
return compute_rmse(loss_train), pred | identifier_body |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given path and build numpy array to form training and test data.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
:return: features, targets, and indexes for training and testing
"""
print('\nLoad the raw training and test set data...')
y_train, tx_train, ids_train = load_csv_data(train_file)
y_test, tx_test, ids_test = load_csv_data(test_file)
print('\n... finished.')
return y_train, tx_train, ids_train, y_test, tx_test, ids_test
def get_header(file):
"""
Get the header line from the given file
:param file: file name/ path
:return: dict object specifying the first header line from the file
"""
read_file = open(file, 'r')
reader = csv.DictReader(read_file)
return reader.fieldnames
def analyze(tx):
"""
Analyze data by replacing null value, -999, with the median of non-null value in the
certain column. Also, handle outliers by placing original value with upper and lower bound
(mean +- std from a feature distribution). Finally, record the columns that have zero
variance, which would be removed.
:param tx: raw training data
:return: the list of columns which will be deleted
"""
num_cols = tx.shape[1]
print('\nNumber of columns in the data matrix: ',num_cols)
columns_to_remove = []
print('Analysis of data:\n')
for col in range(num_cols):
current_col = tx[:, col]
if len(np.unique(current_col)) == 1:
print('The column with index ', col, ' is all the same, it will be removed.')
columns_to_remove.append(col)
else:
current_col[current_col == -999] = np.median(current_col[current_col != -999])
# Handling the outliers
std_current_col = np.std(current_col)
mean_current_col = np.mean(current_col)
lower_bound = mean_current_col - 2 * std_current_col
upper_bound = mean_current_col + 2 * std_current_col
current_col[current_col < lower_bound] = lower_bound
current_col[current_col > upper_bound] = upper_bound
print('Null values in the ', col, ' indexed column are replaced with the mean and outliers.')
return columns_to_remove
def remove_columns(tx, header, columns_to_remove):
"""
Remove the columns recorded in the variable, col_to_remove, from training data tx and
header.
:param tx: an array of training data
:param header: header array
:param columns_to_remove: the list indicating which column to be removed
:return: modified training data, tx, and header
"""
print("\nRemove columns...")
num_removed = 0
for col in columns_to_remove:
|
print("\n... finished.")
return tx, header
def create_csv(output_file, y, tx, ids, header, is_test):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
print('\nCreate new csv file named ' + str(output_file) + '...')
with open(output_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)
writer.writeheader()
for idx, y_row, tx_row in zip(ids, y, tx):
if is_test:
prediction = '?'
else:
prediction = 'b' if y_row == -1 else 's'
dictionary = {'Id': int(idx),'Prediction': prediction}
for index in range(len(tx_row)):
dictionary[header[index + 2]] = float(tx_row[index])
writer.writerow(dictionary)
print('\n... finished.')
def split_data(y, tx, ids, jet_num):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
mask = tx[:, 22] == jet_num
return y[mask], tx[mask], ids[mask]
def process_data(train_file, test_file):
"""
Create 4 new training dataset files and 4 new test dataset files.
First, split the initial data tests using the discrete valued feature jet number,
which can only take the values 0, 1, 2 and 3. Second, process the split data
sets by replacing null values and deleting zero variance features.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
"""
y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)
header_train = get_header(train_file)
header_test = get_header(test_file)
print('\nData set will be split into four, each representing data with different jet numbers.')
for jet_num in range(4):
print('\nProcess training set with jet number = ' + str(jet_num) + '...')
y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)
columns_to_remove = analyze(tx_train_jet)
tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)
create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)
print('\n... created train_jet_' + str(jet_num) + '.csv file.')
print('\nProcess test set with jet number = ' + str(jet_num) + '...')
y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)
columns_to_remove = analyze(tx_test_jet)
tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)
create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)
print('\n... created test_jet_' + str(jet_num) + '.csv file.')
def report_prediction_accuracy(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model applied on a set of labels.
:param y: labels
:param tx: training data
:param w: weights
:return: accuracy of predictions on a dataset
"""
predictions = tx.dot(w_best)
predictions[predictions >= 0] = 1
predictions[predictions < 0] = -1
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ', correct_percentage * 100, '%')
return correct_percentage
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups.
:param y: labels
:param k_fold: number of folds
:param seed: random generator seed
:return: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, augmented_tx, k_indices, k, lambda_, report_predictions=False):
"""
Perform cross_validation for a specific test set from the partitioned set.
:param y: label data
:param augmented_tx: augmented features
:param k_indices: An array of k sub-indices that are randomly partitioned
:param k: number of folds
:param lambda_: regularization parameters
:param report_predictions: report prediction or not
:return: root mean square of loss training error, prediction
"""
y_test = y[k_indices[k]]
y_train = np.delete(y, k_indices[k])
augmented_tx_test = augmented_tx[k_indices[k]]
augmented_tx_train = np.delete(augmented_tx, k_indices[k], axis = 0)
w, loss_train = ridge_regression(y_train, augmented_tx_train, lambda_)
pred = report_prediction_accuracy(y_test, augmented_tx_test, w, False)
return compute_rmse(loss_train), pred
def report_prediction_accuracy_logistic(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model that is applied
on a set of labels. This method specifically works for logistic regression
since the prediction assumes that labels are between 0 and 1.
:param y: labels
:param tx: training data
:param w_best: Optimized weight vector of the model
:return: the percentage of correct predictions of the model when it is applied on the given test set of labels
"""
predictions = tx.dot(w_best)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ',correct_percentage * 100, '%')
return correct_percentage
def train_test_split(y, tx, ratio, seed=1):
"""
Split a given training data set to a test set and a training set,
the sizes of the created sets are determined by the given ration.
:param y: labels
:param tx: training data
:param ratio: ratio for splitting training and testing data
:param seed: random seed
:return: numpy array of training and testing data
"""
np.random.seed(seed)
permutation = np.random.permutation(len(y))
shuffled_tx = tx[permutation]
shuffled_y = y[permutation]
split_position = int(len(y) * ratio)
tx_training, tx_test = shuffled_tx[: split_position], shuffled_tx[split_position:]
y_training, y_test = shuffled_y[: split_position], shuffled_y[split_position:]
return y_training, tx_training, y_test, tx_test
def standardize(x, mean_x=None, std_x=None):
"""
Standardize original data from the dataset.
:param x: data to standardize
:param mean_x: mean value of data given by the dataset
:param std_x: standard deviation of data given by the dataset
:return: standardized data
"""
if mean_x is None:
mean_x = np.mean(x,axis=0)
x = x - mean_x
if std_x is None:
std_x = np.std(x,axis=0)
x = x / std_x
return x, mean_x, std_x
def min_max_normalization(x, min_x = None, max_x = None):
"""
Normalize original data using the minimum and maximum value in the dataset
:param x: data to normalize
:param min_x: minimum value of data
:param max_x: maximum value of data
:return: normalized data
"""
if min_x is None:
min_x = np.min(x, axis=0)
if max_x is None:
max_x = np.max(x, axis=0)
return (x - (min_x)) / (max_x - min_x), min_x, max_x
def change_labels_logistic(y):
"""
The labels in logistic regression are interpreted as probabilities,
so this method transfers the labels to the range [0, 1]
:param y: labels
:return: labels as probability
"""
y[y == -1] = 0
return y
| tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1 | conditional_block |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given path and build numpy array to form training and test data.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
:return: features, targets, and indexes for training and testing
"""
print('\nLoad the raw training and test set data...')
y_train, tx_train, ids_train = load_csv_data(train_file)
y_test, tx_test, ids_test = load_csv_data(test_file)
print('\n... finished.')
return y_train, tx_train, ids_train, y_test, tx_test, ids_test
def get_header(file):
"""
Get the header line from the given file
:param file: file name/ path
:return: dict object specifying the first header line from the file
"""
read_file = open(file, 'r')
reader = csv.DictReader(read_file)
return reader.fieldnames
def analyze(tx):
"""
Analyze data by replacing null value, -999, with the median of non-null value in the
certain column. Also, handle outliers by placing original value with upper and lower bound
(mean +- std from a feature distribution). Finally, record the columns that have zero
variance, which would be removed.
:param tx: raw training data
:return: the list of columns which will be deleted
"""
num_cols = tx.shape[1]
print('\nNumber of columns in the data matrix: ',num_cols)
columns_to_remove = []
print('Analysis of data:\n')
for col in range(num_cols):
current_col = tx[:, col]
if len(np.unique(current_col)) == 1:
print('The column with index ', col, ' is all the same, it will be removed.')
columns_to_remove.append(col)
else:
current_col[current_col == -999] = np.median(current_col[current_col != -999])
# Handling the outliers
std_current_col = np.std(current_col)
mean_current_col = np.mean(current_col)
lower_bound = mean_current_col - 2 * std_current_col
upper_bound = mean_current_col + 2 * std_current_col
current_col[current_col < lower_bound] = lower_bound
current_col[current_col > upper_bound] = upper_bound
print('Null values in the ', col, ' indexed column are replaced with the mean and outliers.')
return columns_to_remove
def remove_columns(tx, header, columns_to_remove):
"""
Remove the columns recorded in the variable, col_to_remove, from training data tx and
header.
:param tx: an array of training data
:param header: header array
:param columns_to_remove: the list indicating which column to be removed
:return: modified training data, tx, and header
"""
print("\nRemove columns...")
num_removed = 0
for col in columns_to_remove:
tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1
print("\n... finished.")
return tx, header
def create_csv(output_file, y, tx, ids, header, is_test):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
print('\nCreate new csv file named ' + str(output_file) + '...')
with open(output_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)
writer.writeheader()
for idx, y_row, tx_row in zip(ids, y, tx):
if is_test:
prediction = '?'
else:
prediction = 'b' if y_row == -1 else 's'
dictionary = {'Id': int(idx),'Prediction': prediction} |
def split_data(y, tx, ids, jet_num):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of training data
:param ids: an array of index of data
:param jet_num: discrete integer value for some feature
:return: an numpy array of labels, training data, and index having specified the certain jet number
"""
mask = tx[:, 22] == jet_num
return y[mask], tx[mask], ids[mask]
def process_data(train_file, test_file):
"""
Create 4 new training dataset files and 4 new test dataset files.
First, split the initial data tests using the discrete valued feature jet number,
which can only take the values 0, 1, 2 and 3. Second, process the split data
sets by replacing null values and deleting zero variance features.
:param train_file: file name/ path for input training data
:param test_file: file name/ path for input testing data
"""
y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)
header_train = get_header(train_file)
header_test = get_header(test_file)
print('\nData set will be split into four, each representing data with different jet numbers.')
for jet_num in range(4):
print('\nProcess training set with jet number = ' + str(jet_num) + '...')
y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)
columns_to_remove = analyze(tx_train_jet)
tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)
create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)
print('\n... created train_jet_' + str(jet_num) + '.csv file.')
print('\nProcess test set with jet number = ' + str(jet_num) + '...')
y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)
columns_to_remove = analyze(tx_test_jet)
tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)
create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)
print('\n... created test_jet_' + str(jet_num) + '.csv file.')
def report_prediction_accuracy(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model applied on a set of labels.
:param y: labels
:param tx: training data
:param w: weights
:return: accuracy of predictions on a dataset
"""
predictions = tx.dot(w_best)
predictions[predictions >= 0] = 1
predictions[predictions < 0] = -1
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ', correct_percentage * 100, '%')
return correct_percentage
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups.
:param y: labels
:param k_fold: number of folds
:param seed: random generator seed
:return: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, augmented_tx, k_indices, k, lambda_, report_predictions=False):
"""
Perform cross_validation for a specific test set from the partitioned set.
:param y: label data
:param augmented_tx: augmented features
:param k_indices: An array of k sub-indices that are randomly partitioned
:param k: number of folds
:param lambda_: regularization parameters
:param report_predictions: report prediction or not
:return: root mean square of loss training error, prediction
"""
y_test = y[k_indices[k]]
y_train = np.delete(y, k_indices[k])
augmented_tx_test = augmented_tx[k_indices[k]]
augmented_tx_train = np.delete(augmented_tx, k_indices[k], axis = 0)
w, loss_train = ridge_regression(y_train, augmented_tx_train, lambda_)
pred = report_prediction_accuracy(y_test, augmented_tx_test, w, False)
return compute_rmse(loss_train), pred
def report_prediction_accuracy_logistic(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model that is applied
on a set of labels. This method specifically works for logistic regression
since the prediction assumes that labels are between 0 and 1.
:param y: labels
:param tx: training data
:param w_best: Optimized weight vector of the model
:return: the percentage of correct predictions of the model when it is applied on the given test set of labels
"""
predictions = tx.dot(w_best)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
correct_percentage = np.sum(predictions == y) / float(len(predictions))
if verbose:
print('Percentage of correct predictions is: ',correct_percentage * 100, '%')
return correct_percentage
def train_test_split(y, tx, ratio, seed=1):
"""
Split a given training data set to a test set and a training set,
the sizes of the created sets are determined by the given ration.
:param y: labels
:param tx: training data
:param ratio: ratio for splitting training and testing data
:param seed: random seed
:return: numpy array of training and testing data
"""
np.random.seed(seed)
permutation = np.random.permutation(len(y))
shuffled_tx = tx[permutation]
shuffled_y = y[permutation]
split_position = int(len(y) * ratio)
tx_training, tx_test = shuffled_tx[: split_position], shuffled_tx[split_position:]
y_training, y_test = shuffled_y[: split_position], shuffled_y[split_position:]
return y_training, tx_training, y_test, tx_test
def standardize(x, mean_x=None, std_x=None):
"""
Standardize original data from the dataset.
:param x: data to standardize
:param mean_x: mean value of data given by the dataset
:param std_x: standard deviation of data given by the dataset
:return: standardized data
"""
if mean_x is None:
mean_x = np.mean(x,axis=0)
x = x - mean_x
if std_x is None:
std_x = np.std(x,axis=0)
x = x / std_x
return x, mean_x, std_x
def min_max_normalization(x, min_x = None, max_x = None):
"""
Normalize original data using the minimum and maximum value in the dataset
:param x: data to normalize
:param min_x: minimum value of data
:param max_x: maximum value of data
:return: normalized data
"""
if min_x is None:
min_x = np.min(x, axis=0)
if max_x is None:
max_x = np.max(x, axis=0)
return (x - (min_x)) / (max_x - min_x), min_x, max_x
def change_labels_logistic(y):
"""
The labels in logistic regression are interpreted as probabilities,
so this method transfers the labels to the range [0, 1]
:param y: labels
:return: labels as probability
"""
y[y == -1] = 0
return y | for index in range(len(tx_row)):
dictionary[header[index + 2]] = float(tx_row[index])
writer.writerow(dictionary)
print('\n... finished.')
| random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
}
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers, .. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn upgrade<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual { .. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new( | let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
} | vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone()); | random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self |
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers, .. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn upgrade<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual { .. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new(
vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone());
let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
}
| {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
} | identifier_body |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
}
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers, .. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn | <TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual { .. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new(
vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone());
let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
}
| upgrade | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn | (red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g;
}
/// Blue's getter
pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with !color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image {
return Image {width : width, height : height, pixels : pixels};
}
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height != other.height {
return false;
}
if self.width != other.width {
return false;
}
if self.pixels != other.pixels {
return false;
}
return true;
}
/// Create a new Image from a .ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap() != "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap() != '#') || (i != 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
| new | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn new(red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g;
}
/// Blue's getter
pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with !color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image |
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height != other.height {
return false;
}
if self.width != other.width {
return false;
}
if self.pixels != other.pixels {
return false;
}
return true;
}
/// Create a new Image from a .ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap() != "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap() != '#') || (i != 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
| {
return Image {width : width, height : height, pixels : pixels};
} | identifier_body |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn new(red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g; | pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with !color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image {
return Image {width : width, height : height, pixels : pixels};
}
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height != other.height {
return false;
}
if self.width != other.width {
return false;
}
if self.pixels != other.pixels {
return false;
}
return true;
}
/// Create a new Image from a .ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap() != "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap() != '#') || (i != 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &é45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c == ' ' || c == '\0' {
if !n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if !n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
} | }
/// Blue's getter | random_line_split |
main.py | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join, isfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Builder, Default,
DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
#overides the default upload.maximum_size value which is used to calculate how much of the memory is used up
if "size" in env.BoardConfig().get("build") and "application_type" in env.GetProjectOptions(as_dict=True):
ApplicationType = env.GetProjectOptions(as_dict=True).get("application_type")
ProgramSize = env.BoardConfig().get("build.size.full")
if "boot" == ApplicationType:
# use the bootloader linker
ProgramSize = env.BoardConfig().get("build.size.boot")
elif "app" == ApplicationType:
ProgramSize = env.BoardConfig().get("build.size.app")
env.BoardConfig().update("upload.maximum_size", ProgramSize);
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
OBJDUMP="arm-none-eabi-objdump",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Bin Output -> $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Hex Output -> $TARGET"),
suffix=".hex"
),
MergeHex=Builder(
action=env.VerboseAction(" ".join([
join(platform.get_package_dir("tool-sreccat") or "",
"srec_cat"),
"$SOFTDEVICEHEX",
"-intel",
"$SOURCES",
"-intel",
"-o",
"$TARGET",
"-intel",
"--line-length=44"
]), "Building $TARGET"),
suffix=".hex"
),
ObjectDump=Builder(
action=env.VerboseAction(" ".join([
"$OBJDUMP",
"-D",
| "$TARGET"
]), "disassembler Output -> $TARGET"),
suffix=".dis"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_firm_elf = None
target_firm_hex = None
object_dump_dis = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_firm_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_firm_elf = env.BuildProgram()
if "SOFTDEVICEHEX" in env:
merged_softdevice_hex = env.MergeHex(join("$BUILD_DIR", "${PROGNAME}"), env.ElfToHex(join("$BUILD_DIR", "user_${PROGNAME}"), target_firm_elf))
target_firm_hex = join("$BUILD_DIR", "user_${PROGNAME}.hex")
else :
target_firm_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
object_dump_dis = env.ObjectDump(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
#
# Target: Upload by default .bin file
#
if env.subst("$UPLOAD_PROTOCOL") == "teensy-gui" and not isfile( join( platform.get_package_dir("tool-teensy") or "", "teensy_post_compile.exe" if system() == "Windows" else "teensy_post_compile") ):
env.Replace(UPLOAD_PROTOCOL="teensy-cli")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = env.BoardConfig().get("debug.tools", {})
upload_source = target_firm
if upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
#elif upload_protocol in debug_tools:
# env.Replace(
# UPLOADER="openocd",
# UPLOADERFLAGS=["-s", platform.get_package_dir("tool-openocd") or ""] +
# debug_tools.get(upload_protocol).get("server").get("arguments", []) + [
# "-c",
# "program {$SOURCE} verify reset %s; shutdown;" %
# env.BoardConfig().get("upload.offset_address", "")
# ],
# UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
#
# if not env.BoardConfig().get("upload").get("offset_address"):
# upload_source = target_firm_elf
#
# AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
# custom upload tool
elif upload_protocol == "custom":
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "nrfjprog":
env.Replace( ERASEFLAGS=["--eraseall", "-f", "NRF52"],
ERASECMD="nrfjprog_bin $ERASEFLAGS",
UPLOADER="nrfjprog_bin",
UPLOADERFLAGS=[
"--chiperase",
"--reset"
],
PARTIAL_UPLOADERFLAGS=[
"--sectoranduicrerase",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS --program $SOURCE",
PARTIAL_UPLOADCMD="$UPLOADER $PARTIAL_UPLOADERFLAGS --program $SOURCE")
AlwaysBuild(env.Alias("erase", None, env.VerboseAction("$ERASECMD", "Erasing...")))
if merged_softdevice_hex :
AlwaysBuild(env.Alias("upload_softdevice", env.get('SOFTDEVICEHEX'), [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload_merged", merged_softdevice_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else :
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "teensy-cli":
env.Replace(
REBOOTER="teensy_reboot",
UPLOADER="teensy_loader_cli",
UPLOADERFLAGS=[
"-mmcu=$BOARD_MCU",
"-w", # wait for device to appear
"-s", # soft reboot if device not online
"-v" # verbose output
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [ env.VerboseAction("$REBOOTER -s", "Rebooting..."), env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE") ]))
elif upload_protocol == "teensy-gui":
env.Replace(
UPLOADER="teensy_post_compile",
UPLOADERFLAGS=[
"-file=${PROGNAME}", '-path="$BUILD_DIR"',
"-tools=%s" % (platform.get_package_dir("tool-teensy") or ""),
"-board=%s" % env.BoardConfig().id.upper(),
"-reboot"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("nobuild", target_firm))
#target_buildprog = env.Alias("buildprog", target_firm)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_firm_elf, env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Default targets
#
Default([
env.Alias("buildprog", target_firm),
env.Alias("hex", target_firm_hex),
env.Alias("dumpDis", object_dump_dis),
target_size
]) | "$SOURCES",
">",
| random_line_split |
main.py | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join, isfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Builder, Default,
DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
#overides the default upload.maximum_size value which is used to calculate how much of the memory is used up
if "size" in env.BoardConfig().get("build") and "application_type" in env.GetProjectOptions(as_dict=True):
ApplicationType = env.GetProjectOptions(as_dict=True).get("application_type")
ProgramSize = env.BoardConfig().get("build.size.full")
if "boot" == ApplicationType:
# use the bootloader linker
ProgramSize = env.BoardConfig().get("build.size.boot")
elif "app" == ApplicationType:
ProgramSize = env.BoardConfig().get("build.size.app")
env.BoardConfig().update("upload.maximum_size", ProgramSize);
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
OBJDUMP="arm-none-eabi-objdump",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Bin Output -> $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Hex Output -> $TARGET"),
suffix=".hex"
),
MergeHex=Builder(
action=env.VerboseAction(" ".join([
join(platform.get_package_dir("tool-sreccat") or "",
"srec_cat"),
"$SOFTDEVICEHEX",
"-intel",
"$SOURCES",
"-intel",
"-o",
"$TARGET",
"-intel",
"--line-length=44"
]), "Building $TARGET"),
suffix=".hex"
),
ObjectDump=Builder(
action=env.VerboseAction(" ".join([
"$OBJDUMP",
"-D",
"$SOURCES",
">",
"$TARGET"
]), "disassembler Output -> $TARGET"),
suffix=".dis"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_firm_elf = None
target_firm_hex = None
object_dump_dis = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_firm_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_firm_elf = env.BuildProgram()
if "SOFTDEVICEHEX" in env:
merged_softdevice_hex = env.MergeHex(join("$BUILD_DIR", "${PROGNAME}"), env.ElfToHex(join("$BUILD_DIR", "user_${PROGNAME}"), target_firm_elf))
target_firm_hex = join("$BUILD_DIR", "user_${PROGNAME}.hex")
else :
target_firm_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
object_dump_dis = env.ObjectDump(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
#
# Target: Upload by default .bin file
#
if env.subst("$UPLOAD_PROTOCOL") == "teensy-gui" and not isfile( join( platform.get_package_dir("tool-teensy") or "", "teensy_post_compile.exe" if system() == "Windows" else "teensy_post_compile") ):
env.Replace(UPLOAD_PROTOCOL="teensy-cli")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = env.BoardConfig().get("debug.tools", {})
upload_source = target_firm
if upload_protocol.startswith("jlink"):
def | (env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
#elif upload_protocol in debug_tools:
# env.Replace(
# UPLOADER="openocd",
# UPLOADERFLAGS=["-s", platform.get_package_dir("tool-openocd") or ""] +
# debug_tools.get(upload_protocol).get("server").get("arguments", []) + [
# "-c",
# "program {$SOURCE} verify reset %s; shutdown;" %
# env.BoardConfig().get("upload.offset_address", "")
# ],
# UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
#
# if not env.BoardConfig().get("upload").get("offset_address"):
# upload_source = target_firm_elf
#
# AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
# custom upload tool
elif upload_protocol == "custom":
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "nrfjprog":
env.Replace( ERASEFLAGS=["--eraseall", "-f", "NRF52"],
ERASECMD="nrfjprog_bin $ERASEFLAGS",
UPLOADER="nrfjprog_bin",
UPLOADERFLAGS=[
"--chiperase",
"--reset"
],
PARTIAL_UPLOADERFLAGS=[
"--sectoranduicrerase",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS --program $SOURCE",
PARTIAL_UPLOADCMD="$UPLOADER $PARTIAL_UPLOADERFLAGS --program $SOURCE")
AlwaysBuild(env.Alias("erase", None, env.VerboseAction("$ERASECMD", "Erasing...")))
if merged_softdevice_hex :
AlwaysBuild(env.Alias("upload_softdevice", env.get('SOFTDEVICEHEX'), [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload_merged", merged_softdevice_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else :
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "teensy-cli":
env.Replace(
REBOOTER="teensy_reboot",
UPLOADER="teensy_loader_cli",
UPLOADERFLAGS=[
"-mmcu=$BOARD_MCU",
"-w", # wait for device to appear
"-s", # soft reboot if device not online
"-v" # verbose output
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [ env.VerboseAction("$REBOOTER -s", "Rebooting..."), env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE") ]))
elif upload_protocol == "teensy-gui":
env.Replace(
UPLOADER="teensy_post_compile",
UPLOADERFLAGS=[
"-file=${PROGNAME}", '-path="$BUILD_DIR"',
"-tools=%s" % (platform.get_package_dir("tool-teensy") or ""),
"-board=%s" % env.BoardConfig().id.upper(),
"-reboot"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("nobuild", target_firm))
#target_buildprog = env.Alias("buildprog", target_firm)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_firm_elf, env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Default targets
#
Default([
env.Alias("buildprog", target_firm),
env.Alias("hex", target_firm_hex),
env.Alias("dumpDis", object_dump_dis),
target_size
]) | _jlink_cmd_script | identifier_name |
main.py | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join, isfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Builder, Default,
DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
#overides the default upload.maximum_size value which is used to calculate how much of the memory is used up
if "size" in env.BoardConfig().get("build") and "application_type" in env.GetProjectOptions(as_dict=True):
ApplicationType = env.GetProjectOptions(as_dict=True).get("application_type")
ProgramSize = env.BoardConfig().get("build.size.full")
if "boot" == ApplicationType:
# use the bootloader linker
ProgramSize = env.BoardConfig().get("build.size.boot")
elif "app" == ApplicationType:
ProgramSize = env.BoardConfig().get("build.size.app")
env.BoardConfig().update("upload.maximum_size", ProgramSize);
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
OBJDUMP="arm-none-eabi-objdump",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Bin Output -> $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Hex Output -> $TARGET"),
suffix=".hex"
),
MergeHex=Builder(
action=env.VerboseAction(" ".join([
join(platform.get_package_dir("tool-sreccat") or "",
"srec_cat"),
"$SOFTDEVICEHEX",
"-intel",
"$SOURCES",
"-intel",
"-o",
"$TARGET",
"-intel",
"--line-length=44"
]), "Building $TARGET"),
suffix=".hex"
),
ObjectDump=Builder(
action=env.VerboseAction(" ".join([
"$OBJDUMP",
"-D",
"$SOURCES",
">",
"$TARGET"
]), "disassembler Output -> $TARGET"),
suffix=".dis"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_firm_elf = None
target_firm_hex = None
object_dump_dis = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_firm_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_firm_elf = env.BuildProgram()
if "SOFTDEVICEHEX" in env:
merged_softdevice_hex = env.MergeHex(join("$BUILD_DIR", "${PROGNAME}"), env.ElfToHex(join("$BUILD_DIR", "user_${PROGNAME}"), target_firm_elf))
target_firm_hex = join("$BUILD_DIR", "user_${PROGNAME}.hex")
else :
target_firm_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
object_dump_dis = env.ObjectDump(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
#
# Target: Upload by default .bin file
#
if env.subst("$UPLOAD_PROTOCOL") == "teensy-gui" and not isfile( join( platform.get_package_dir("tool-teensy") or "", "teensy_post_compile.exe" if system() == "Windows" else "teensy_post_compile") ):
env.Replace(UPLOAD_PROTOCOL="teensy-cli")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = env.BoardConfig().get("debug.tools", {})
upload_source = target_firm
if upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
|
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
#elif upload_protocol in debug_tools:
# env.Replace(
# UPLOADER="openocd",
# UPLOADERFLAGS=["-s", platform.get_package_dir("tool-openocd") or ""] +
# debug_tools.get(upload_protocol).get("server").get("arguments", []) + [
# "-c",
# "program {$SOURCE} verify reset %s; shutdown;" %
# env.BoardConfig().get("upload.offset_address", "")
# ],
# UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
#
# if not env.BoardConfig().get("upload").get("offset_address"):
# upload_source = target_firm_elf
#
# AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
# custom upload tool
elif upload_protocol == "custom":
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "nrfjprog":
env.Replace( ERASEFLAGS=["--eraseall", "-f", "NRF52"],
ERASECMD="nrfjprog_bin $ERASEFLAGS",
UPLOADER="nrfjprog_bin",
UPLOADERFLAGS=[
"--chiperase",
"--reset"
],
PARTIAL_UPLOADERFLAGS=[
"--sectoranduicrerase",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS --program $SOURCE",
PARTIAL_UPLOADCMD="$UPLOADER $PARTIAL_UPLOADERFLAGS --program $SOURCE")
AlwaysBuild(env.Alias("erase", None, env.VerboseAction("$ERASECMD", "Erasing...")))
if merged_softdevice_hex :
AlwaysBuild(env.Alias("upload_softdevice", env.get('SOFTDEVICEHEX'), [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload_merged", merged_softdevice_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else :
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "teensy-cli":
env.Replace(
REBOOTER="teensy_reboot",
UPLOADER="teensy_loader_cli",
UPLOADERFLAGS=[
"-mmcu=$BOARD_MCU",
"-w", # wait for device to appear
"-s", # soft reboot if device not online
"-v" # verbose output
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [ env.VerboseAction("$REBOOTER -s", "Rebooting..."), env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE") ]))
elif upload_protocol == "teensy-gui":
env.Replace(
UPLOADER="teensy_post_compile",
UPLOADERFLAGS=[
"-file=${PROGNAME}", '-path="$BUILD_DIR"',
"-tools=%s" % (platform.get_package_dir("tool-teensy") or ""),
"-board=%s" % env.BoardConfig().id.upper(),
"-reboot"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("nobuild", target_firm))
#target_buildprog = env.Alias("buildprog", target_firm)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_firm_elf, env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Default targets
#
Default([
env.Alias("buildprog", target_firm),
env.Alias("hex", target_firm_hex),
env.Alias("dumpDis", object_dump_dis),
target_size
]) | build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path | identifier_body |
main.py | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join, isfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Builder, Default,
DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
#overides the default upload.maximum_size value which is used to calculate how much of the memory is used up
if "size" in env.BoardConfig().get("build") and "application_type" in env.GetProjectOptions(as_dict=True):
ApplicationType = env.GetProjectOptions(as_dict=True).get("application_type")
ProgramSize = env.BoardConfig().get("build.size.full")
if "boot" == ApplicationType:
# use the bootloader linker
ProgramSize = env.BoardConfig().get("build.size.boot")
elif "app" == ApplicationType:
ProgramSize = env.BoardConfig().get("build.size.app")
env.BoardConfig().update("upload.maximum_size", ProgramSize);
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
OBJDUMP="arm-none-eabi-objdump",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
|
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Bin Output -> $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Hex Output -> $TARGET"),
suffix=".hex"
),
MergeHex=Builder(
action=env.VerboseAction(" ".join([
join(platform.get_package_dir("tool-sreccat") or "",
"srec_cat"),
"$SOFTDEVICEHEX",
"-intel",
"$SOURCES",
"-intel",
"-o",
"$TARGET",
"-intel",
"--line-length=44"
]), "Building $TARGET"),
suffix=".hex"
),
ObjectDump=Builder(
action=env.VerboseAction(" ".join([
"$OBJDUMP",
"-D",
"$SOURCES",
">",
"$TARGET"
]), "disassembler Output -> $TARGET"),
suffix=".dis"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_firm_elf = None
target_firm_hex = None
object_dump_dis = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_firm_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_firm_elf = env.BuildProgram()
if "SOFTDEVICEHEX" in env:
merged_softdevice_hex = env.MergeHex(join("$BUILD_DIR", "${PROGNAME}"), env.ElfToHex(join("$BUILD_DIR", "user_${PROGNAME}"), target_firm_elf))
target_firm_hex = join("$BUILD_DIR", "user_${PROGNAME}.hex")
else :
target_firm_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
object_dump_dis = env.ObjectDump(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_firm_elf)
#
# Target: Upload by default .bin file
#
if env.subst("$UPLOAD_PROTOCOL") == "teensy-gui" and not isfile( join( platform.get_package_dir("tool-teensy") or "", "teensy_post_compile.exe" if system() == "Windows" else "teensy_post_compile") ):
env.Replace(UPLOAD_PROTOCOL="teensy-cli")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = env.BoardConfig().get("debug.tools", {})
upload_source = target_firm
if upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
#elif upload_protocol in debug_tools:
# env.Replace(
# UPLOADER="openocd",
# UPLOADERFLAGS=["-s", platform.get_package_dir("tool-openocd") or ""] +
# debug_tools.get(upload_protocol).get("server").get("arguments", []) + [
# "-c",
# "program {$SOURCE} verify reset %s; shutdown;" %
# env.BoardConfig().get("upload.offset_address", "")
# ],
# UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
#
# if not env.BoardConfig().get("upload").get("offset_address"):
# upload_source = target_firm_elf
#
# AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
# custom upload tool
elif upload_protocol == "custom":
AlwaysBuild(env.Alias("upload", upload_source, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "nrfjprog":
env.Replace( ERASEFLAGS=["--eraseall", "-f", "NRF52"],
ERASECMD="nrfjprog_bin $ERASEFLAGS",
UPLOADER="nrfjprog_bin",
UPLOADERFLAGS=[
"--chiperase",
"--reset"
],
PARTIAL_UPLOADERFLAGS=[
"--sectoranduicrerase",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS --program $SOURCE",
PARTIAL_UPLOADCMD="$UPLOADER $PARTIAL_UPLOADERFLAGS --program $SOURCE")
AlwaysBuild(env.Alias("erase", None, env.VerboseAction("$ERASECMD", "Erasing...")))
if merged_softdevice_hex :
AlwaysBuild(env.Alias("upload_softdevice", env.get('SOFTDEVICEHEX'), [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$PARTIAL_UPLOADCMD", "Uploading $SOURCE")]))
AlwaysBuild(env.Alias("upload_merged", merged_softdevice_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else :
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
elif upload_protocol == "teensy-cli":
env.Replace(
REBOOTER="teensy_reboot",
UPLOADER="teensy_loader_cli",
UPLOADERFLAGS=[
"-mmcu=$BOARD_MCU",
"-w", # wait for device to appear
"-s", # soft reboot if device not online
"-v" # verbose output
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [ env.VerboseAction("$REBOOTER -s", "Rebooting..."), env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE") ]))
elif upload_protocol == "teensy-gui":
env.Replace(
UPLOADER="teensy_post_compile",
UPLOADERFLAGS=[
"-file=${PROGNAME}", '-path="$BUILD_DIR"',
"-tools=%s" % (platform.get_package_dir("tool-teensy") or ""),
"-board=%s" % env.BoardConfig().id.upper(),
"-reboot"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
AlwaysBuild(env.Alias("upload", target_firm_hex, [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]))
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("nobuild", target_firm))
#target_buildprog = env.Alias("buildprog", target_firm)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_firm_elf, env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Default targets
#
Default([
env.Alias("buildprog", target_firm),
env.Alias("hex", target_firm_hex),
env.Alias("dumpDis", object_dump_dis),
target_size
]) | env.Replace(PROGNAME="firmware") | conditional_block |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct {
platform string
ctx iris.Context
}
type WxService struct {
platform string
loginFrom string
ip string
engine *goXorm.Engine
}
var WxAppId = ""
var WxAppSecret = ""
const WxAuthUrl string = "https://api.weixin.qq.com/sns/oauth2/access_token"
const WxUserInfoUrl string = "https://api.weixin.qq.com/sns/userinfo"
//构造函数
func NewWxController(ctx iris.Context) *WxController {
obj := new(WxController)
obj.platform = ctx.Params().Get("platform")
obj.ctx = ctx
for k, v := range config.PlatformCPs {
if k == obj.platform {
wxSet := v.(map[string]interface{})["wx"].(config.WxSet)
WxAppId = wxSet.AppId
WxAppSecret = wxSet.AppSecret
}
}
return obj
}
// 获取access_token
func wxGetAccessToken(sCode string) (map[string]string, bool) {
params := map[string]string{
"appid": WxAppId,
"secret": WxAppSecret,
"code": sCode,
"grant_type": "authorization_code",
}
reqUrl := utils.BuildUrl(WxAuthUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
result := make(map[string]string)
var ok bool
json.Unmarshal(respData, &result)
if _, exist := result["access_token"]; exist {
ok = true
} else {
ok = false
}
return result, ok
}
func wxGetUserInfo(accessToken, openId string) (userInfo map[string]string, ok bool) {
params := map[string]string{
"access_token": accessToken,
"openid": openId,
}
reqUrl := utils.BuildUrl(WxUserInfoUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
userInfo = make(map[string]string)
json.Unmarshal(respData, &userInfo)
if _, exist := userInfo["openid"]; exist {
ok = true
} else {
ok = false
}
return
}
func (service WxService) createNewUser(userBean xorm.Users, iNow int) (xorm.Users, error) {
session := service.engine.NewSession()
createErr := session.Begin()
defer session.Close()
_, createErr = session.InsertOne(&userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
iUserId := userBean.Id
_, createErr = session.Insert(xorm.Accounts{UserId: iUserId, Updated: iNow})
if createErr != nil {
session.Rollback()
return userBean, createErr
}
token, _ := utils.GenerateToken(&userBean)
userBean.Token = token
userBean.TokenCreated = iNow
userBean.LastLoginTime = iNow
//登录成功以后将token缓存到本地
sTokenTime := strconv.Itoa(userBean.TokenCreated)
_, createErr = session.ID(iUserId).Update(userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
loginLog := xorm.UserLoginLogs{UserId: iUserId, Ip: service.ip, LoginTime: iNow, LoginFrom: service.loginFrom}
_, createErr = session.InsertOne(loginLog)
if createErr != nil {
session.Rollback()
//utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return userBean, createErr
}
err := services.PromotionAward(service.platform, session, userBean.ParentId)
if err != nil {
return userBean, err
}
var innerMsg string
innerMsg, err = services.ActivityAward(service.platform, session, 1, userBean.Id, service.ip)
if err != nil {
session.Rollback()
return userBean, errors.New(innerMsg + "; " + err.Error())
}
createErr = session.Commit()
ut, _ := ramcache.UserNameAndToken.Load(service.platform)
utMap := ut.(map[string][]string)
sUserId := strconv.Itoa(iUserId)
utMap[userBean.UserName] = []string{sUserId, token, sTokenTime, "1"}
utils.UpdateUserIdCard(service.platform, iUserId, map[string]interface{}{
"Username": userBean.UserName,
"Token": userBean.Token,
"TokenCreated": sTokenTime, // 注意,这里要用字符串,否则会提示登录过期
"WxOpenId": userBean.WxOpenId,
})
return userBean, nil
}
// 检查用户是否已经绑定过微信
func (service WxService) checkIsBind(openId string) (userBean xorm.Users, bind bool) {
woiIdx, _ := ramcache.WxOpenIdIndex.Load(service.plat | dxMap := woiIdx.(map[string]beans.WxOpenId)
var wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: userId,
Phone: userProfile.Phone,
UserName: userProfile.Username,
}
}
return
}
func (service WxService) doLogin(userBean xorm.Users) (xorm.Users, error) {
token, _ := utils.GenerateToken(&userBean)
now := utils.GetNowTime()
var userUpdateBean = xorm.Users{
Token: token,
TokenCreated: now,
LastLoginTime: now,
}
var respUserBean xorm.Users
//开始事务
session := service.engine.NewSession()
err := session.Begin()
defer session.Close()
_, err = session.ID(userBean.Id).Update(userUpdateBean)
if err != nil {
session.Rollback()
return respUserBean, err
}
loginLog := xorm.UserLoginLogs{UserId: userBean.Id, Ip: service.ip, LoginTime: now, LoginFrom: service.loginFrom}
_, err = session.InsertOne(loginLog)
if err != nil {
session.Rollback()
return respUserBean, err
}
err = session.Commit()
if err != nil {
return respUserBean, err
}
userBean.Password = ""
userBean.Token = userUpdateBean.Token
userBean.TokenCreated = userUpdateBean.TokenCreated
userBean.LastLoginTime = userUpdateBean.LastLoginTime
utils.UpdateUserIdCard(service.platform, userBean.Id, map[string]interface{}{
"Token": userBean.Token,
"TokenCreated": strconv.Itoa(now), // 注意,这里要用字符串,否则会提示登录过期
})
return userBean, nil
}
/**
* @api {post} api/v1/wxLogin 微信登录
* @apiDescription
* <span style="color:lightcoral;">接口负责人: aTian</span><br/><br/>
* 微信登录<br>
* 业务描述: 微信账号登录,如果之前没有绑定微信,先创建账号再登录</br>
* @apiVersion 1.0.0
* @apiName WxLogin
* @apiGroup user
* @apiPermission iso,android客户端
* @apiParam (客户端请求参数) {string} code 从微信获取到的code,用于微信OAuth2.0授权登录
* @apiParam (客户端请求参数) {string} parent_id 上级代理用户Id
* @apiParam (客户端请求参数) {string} login_from 登录来源 IOS Android
*
* @apiError (请求失败返回) {int} code 错误代码
* @apiError (请求失败返回) {string} clientMsg 提示信息
* @apiError (请求失败返回) {string} internalMsg 错误代码
* @apiError (请求失败返回) {float} timeConsumed 后台耗时
*
* @apiErrorExample {json} 失败返回
* {
* "code": 204,
* "internalMsg": "",
* "clientMsg ": 0,
* "timeConsumed": 0
* }
*
* @apiSuccess (返回结果) {int} code 200
* @apiSuccess (返回结果) {string} clientMsg 提示信息
* @apiSuccess (返回结果) {string} internalMsg 提示信息
* @apiSuccess (返回结果) {json} data 返回数据
* @apiSuccess (返回结果) {float} timeConsumed 后台耗时
*
* @apiSuccessExample {json} 响应结果
* {
* "clientMsg": "登录成功",
* "code": 200,
* "data": {
* "Id": 207,
* "Phone": "",
* "Password": "",
* "UserName": "15592054945OlocX",
* "Name": "mj1958",
* "Email": "",
* "Created": 1559205494,
* "Birthday": "",
* "MobileType": 1,
* "Sex": 1,
* "Path": "",
* "VipLevel": 1,
* "Qq": "",
* "Wechat": "",
* "Status": 1,
* "ProxyStatus": 0,
* "UserType": 0,
* "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NTk0NjQ2OTUsInBob25lIjoiIiwic3ViIjoyMDcsInVzZXJOYW1lIjoiMTU1OTIwNTQ5NDVPbG9jWCJ9.h9R-y7HtGNgrU3KdE08vdFUH3zJhZ1IzK3q0JPXTSL4",
* "RegIp": "",
* "UniqueCode": "",
* "TokenCreated": 1559205494,
* "SafePassword": "",
* "UserGroupId": "",
* "ParentId": 0,
* "LastLoginTime": 1559205494,
* "LastPlatformId": 0,
* "GroupSize": 0
* },
* "internalMsg": "",
* "timeConsumed": 8123478
* }
*/
func (controller *WxController) WxLogin() {
ctx := controller.ctx
if !utils.RequiredParamPost(&ctx, []string{"code", "login_from"}) {
return
}
sCode := ctx.FormValue("code")
sLoginFrom := ctx.FormValue("login_from")
sParentId := ctx.FormValue("parent_id")
iParentId, transErr := strconv.Atoi(sParentId)
if transErr != nil {
iParentId = 0
}
tokenInfo, tokenOk := wxGetAccessToken(sCode)
if tokenOk {
platform := controller.platform
engine := models.MyEngine[platform]
var service = &WxService{
engine: engine,
platform: platform,
}
sOpenId := tokenInfo["openid"]
userBean, bind := service.checkIsBind(sOpenId)
if bind {
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
userBean, logErr := service.doLogin(userBean)
if logErr == nil {
user := new(xorm.Users)
engine.ID(userBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
} else {
utils.ResFaiJSON(&ctx, logErr.Error(), "微信登录失败", config.NOTGETDATA)
return
}
} else {
iNow := utils.GetNowTime()
sAccessToken := tokenInfo["access_token"]
sOpenId := tokenInfo["openid"]
userInfo, getUserInfoOk := wxGetUserInfo(sAccessToken, sOpenId)
if getUserInfoOk {
var iMobileType = 1
if sLoginFrom == "IOS" {
iMobileType = 2
}
var newUserBean = xorm.DefaultUser()
newUserBean.UserName = "wx" + utils.RandString(5, 2)
newUserBean.Password = ""
newUserBean.Name = userInfo["nickname"]
newUserBean.Phone = ""
newUserBean.ParentId = iParentId
newUserBean.MobileType = iMobileType
newUserBean.WxOpenId = sOpenId
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
var createErr error
newUserBean, createErr = service.createNewUser(newUserBean, iNow)
if createErr != nil {
utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return
} else {
user := new(xorm.Users)
engine.ID(newUserBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
}
} else {
utils.ResFaiJSON(&ctx, "1905301502", "绑定微信失败", config.NOTGETDATA)
return
}
}
} else {
var internalMsg = "1905301706"
var clientMsg = "微信授权失败"
if _, fieldExist := tokenInfo["errcode"]; fieldExist {
internalMsg = tokenInfo["errmsg"]
}
utils.ResFaiJSON(&ctx, internalMsg, clientMsg, config.NOTGETDATA)
return
}
}
| form)
woiI | identifier_name |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct {
platform string
ctx iris.Context
}
type WxService struct {
platform string
loginFrom string
ip string
engine *goXorm.Engine
}
var WxAppId = ""
var WxAppSecret = ""
const WxAuthUrl string = "https://api.weixin.qq.com/sns/oauth2/access_token"
const WxUserInfoUrl string = "https://api.weixin.qq.com/sns/userinfo"
//构造函数
func NewWxController(ctx iris.Context) *WxController {
obj := new(WxController)
obj.platform = ctx.Params().Get("platform")
obj.ctx = ctx
for k, v := range config.PlatformCPs {
if k == obj.platform {
wxSet := v.(map[string]interface{})["wx"].(config.WxSet)
WxAppId = wxSet.AppId
WxAppSecret = wxSet.AppSecret
}
}
return obj
}
// 获取access_token
func wxGetAccessToken(sCode string) (map[string]string, bool) {
params := map[string]string{
"appid": WxAppId,
"secret": WxAppSecret,
"code": sCode,
"grant_type": "authorization_code",
}
reqUrl := utils.BuildUrl(WxAuthUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
result := make(map[string]string)
var ok bool
json.Unmarshal(respData, &result)
if _, exist := result["access_token"]; exist {
ok = true
} else {
ok = false
}
return result, ok
}
func wxGetUserInfo(accessToken, openId string) (userInfo map[string]string, ok bool) {
params := map[string]string{
"access_token": accessToken,
"openid": openId,
}
reqUrl := utils.BuildUrl(WxUserInfoUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
userInfo = make(map[string]string)
json.Unmarshal(respData, &userInfo)
if _, exist := userInfo["openid"]; exist {
ok = true
} else {
ok = false
}
return
}
func (service WxService) createNewUser(userBean xorm.Users, iNow int) (xorm.Users, error) {
session := service.engine.NewSession()
createErr := session.Begin()
defer session.Close()
_, createErr = session.InsertOne(&userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
iUserId := userBean.Id
_, createErr = session.Insert(xorm.Accounts{UserId: iUserId, Updated: iNow})
if createErr != nil {
session.Rollback()
return userBean, createErr
}
token, _ := utils.GenerateToken(&userBean)
userBean.Token = token
userBean.TokenCreated = iNow
userBean.LastLoginTime = iNow
//登录成功以后将token缓存到本地
sTokenTime := strconv.Itoa(userBean.TokenCreated)
_, createErr = session.ID(iUserId).Update(userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
loginLog := xorm.UserLoginLogs{UserId: iUserId, Ip: service.ip, LoginTime: iNow, LoginFrom: service.loginFrom}
_, createErr = session.InsertOne(loginLog)
if createErr != nil {
session.Rollback()
//utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return userBean, createErr
}
err := services.PromotionAward(service.platform, session, userBean.ParentId)
if err != nil {
return userBean, err
}
var innerMsg string
innerMsg, err = services.ActivityAward(service.platform, session, 1, userBean.Id, service.ip)
if err != nil {
session.Rollback()
return userBean, errors.New(innerMsg + "; " + err.Error())
}
createErr = session.Commit()
ut, _ := ramcache.UserNameAndToken.Load(service.platform)
utMap := ut.(map[string][]string)
sUserId := strconv.Itoa(iUserId)
utMap[userBean.UserName] = []string{sUserId, token, sTokenTime, "1"}
utils.UpdateUserIdCard(service.platform, iUserId, map[string]interface{}{
"Username": userBean.UserName,
"Token": userBean.Token,
"TokenCreated": sTokenTime, // 注意,这里要用字符串,否则会提示登录过期
"WxOpenId": userBean.WxOpenId,
})
return userBean, nil
}
// 检查用户是否已经绑定过微信
func (service WxService) checkIsBind(openId string) (userBean xorm.Users, bind bool) {
woiIdx, _ := ramcache.WxOpenIdIndex.Load(service.platform)
woiIdxMap := woiIdx.(map[string]beans.WxOpenId)
var | Bean)
now := utils.GetNowTime()
var userUpdateBean = xorm.Users{
Token: token,
TokenCreated: now,
LastLoginTime: now,
}
var respUserBean xorm.Users
//开始事务
session := service.engine.NewSession()
err := session.Begin()
defer session.Close()
_, err = session.ID(userBean.Id).Update(userUpdateBean)
if err != nil {
session.Rollback()
return respUserBean, err
}
loginLog := xorm.UserLoginLogs{UserId: userBean.Id, Ip: service.ip, LoginTime: now, LoginFrom: service.loginFrom}
_, err = session.InsertOne(loginLog)
if err != nil {
session.Rollback()
return respUserBean, err
}
err = session.Commit()
if err != nil {
return respUserBean, err
}
userBean.Password = ""
userBean.Token = userUpdateBean.Token
userBean.TokenCreated = userUpdateBean.TokenCreated
userBean.LastLoginTime = userUpdateBean.LastLoginTime
utils.UpdateUserIdCard(service.platform, userBean.Id, map[string]interface{}{
"Token": userBean.Token,
"TokenCreated": strconv.Itoa(now), // 注意,这里要用字符串,否则会提示登录过期
})
return userBean, nil
}
/**
* @api {post} api/v1/wxLogin 微信登录
* @apiDescription
* <span style="color:lightcoral;">接口负责人: aTian</span><br/><br/>
* 微信登录<br>
* 业务描述: 微信账号登录,如果之前没有绑定微信,先创建账号再登录</br>
* @apiVersion 1.0.0
* @apiName WxLogin
* @apiGroup user
* @apiPermission iso,android客户端
* @apiParam (客户端请求参数) {string} code 从微信获取到的code,用于微信OAuth2.0授权登录
* @apiParam (客户端请求参数) {string} parent_id 上级代理用户Id
* @apiParam (客户端请求参数) {string} login_from 登录来源 IOS Android
*
* @apiError (请求失败返回) {int} code 错误代码
* @apiError (请求失败返回) {string} clientMsg 提示信息
* @apiError (请求失败返回) {string} internalMsg 错误代码
* @apiError (请求失败返回) {float} timeConsumed 后台耗时
*
* @apiErrorExample {json} 失败返回
* {
* "code": 204,
* "internalMsg": "",
* "clientMsg ": 0,
* "timeConsumed": 0
* }
*
* @apiSuccess (返回结果) {int} code 200
* @apiSuccess (返回结果) {string} clientMsg 提示信息
* @apiSuccess (返回结果) {string} internalMsg 提示信息
* @apiSuccess (返回结果) {json} data 返回数据
* @apiSuccess (返回结果) {float} timeConsumed 后台耗时
*
* @apiSuccessExample {json} 响应结果
* {
* "clientMsg": "登录成功",
* "code": 200,
* "data": {
* "Id": 207,
* "Phone": "",
* "Password": "",
* "UserName": "15592054945OlocX",
* "Name": "mj1958",
* "Email": "",
* "Created": 1559205494,
* "Birthday": "",
* "MobileType": 1,
* "Sex": 1,
* "Path": "",
* "VipLevel": 1,
* "Qq": "",
* "Wechat": "",
* "Status": 1,
* "ProxyStatus": 0,
* "UserType": 0,
* "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NTk0NjQ2OTUsInBob25lIjoiIiwic3ViIjoyMDcsInVzZXJOYW1lIjoiMTU1OTIwNTQ5NDVPbG9jWCJ9.h9R-y7HtGNgrU3KdE08vdFUH3zJhZ1IzK3q0JPXTSL4",
* "RegIp": "",
* "UniqueCode": "",
* "TokenCreated": 1559205494,
* "SafePassword": "",
* "UserGroupId": "",
* "ParentId": 0,
* "LastLoginTime": 1559205494,
* "LastPlatformId": 0,
* "GroupSize": 0
* },
* "internalMsg": "",
* "timeConsumed": 8123478
* }
*/
func (controller *WxController) WxLogin() {
ctx := controller.ctx
if !utils.RequiredParamPost(&ctx, []string{"code", "login_from"}) {
return
}
sCode := ctx.FormValue("code")
sLoginFrom := ctx.FormValue("login_from")
sParentId := ctx.FormValue("parent_id")
iParentId, transErr := strconv.Atoi(sParentId)
if transErr != nil {
iParentId = 0
}
tokenInfo, tokenOk := wxGetAccessToken(sCode)
if tokenOk {
platform := controller.platform
engine := models.MyEngine[platform]
var service = &WxService{
engine: engine,
platform: platform,
}
sOpenId := tokenInfo["openid"]
userBean, bind := service.checkIsBind(sOpenId)
if bind {
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
userBean, logErr := service.doLogin(userBean)
if logErr == nil {
user := new(xorm.Users)
engine.ID(userBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
} else {
utils.ResFaiJSON(&ctx, logErr.Error(), "微信登录失败", config.NOTGETDATA)
return
}
} else {
iNow := utils.GetNowTime()
sAccessToken := tokenInfo["access_token"]
sOpenId := tokenInfo["openid"]
userInfo, getUserInfoOk := wxGetUserInfo(sAccessToken, sOpenId)
if getUserInfoOk {
var iMobileType = 1
if sLoginFrom == "IOS" {
iMobileType = 2
}
var newUserBean = xorm.DefaultUser()
newUserBean.UserName = "wx" + utils.RandString(5, 2)
newUserBean.Password = ""
newUserBean.Name = userInfo["nickname"]
newUserBean.Phone = ""
newUserBean.ParentId = iParentId
newUserBean.MobileType = iMobileType
newUserBean.WxOpenId = sOpenId
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
var createErr error
newUserBean, createErr = service.createNewUser(newUserBean, iNow)
if createErr != nil {
utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return
} else {
user := new(xorm.Users)
engine.ID(newUserBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
}
} else {
utils.ResFaiJSON(&ctx, "1905301502", "绑定微信失败", config.NOTGETDATA)
return
}
}
} else {
var internalMsg = "1905301706"
var clientMsg = "微信授权失败"
if _, fieldExist := tokenInfo["errcode"]; fieldExist {
internalMsg = tokenInfo["errmsg"]
}
utils.ResFaiJSON(&ctx, internalMsg, clientMsg, config.NOTGETDATA)
return
}
}
| wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: userId,
Phone: userProfile.Phone,
UserName: userProfile.Username,
}
}
return
}
func (service WxService) doLogin(userBean xorm.Users) (xorm.Users, error) {
token, _ := utils.GenerateToken(&user | identifier_body |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct {
platform string
ctx iris.Context
}
type WxService struct {
platform string
loginFrom string
ip string
engine *goXorm.Engine
}
var WxAppId = ""
var WxAppSecret = ""
const WxAuthUrl string = "https://api.weixin.qq.com/sns/oauth2/access_token"
const WxUserInfoUrl string = "https://api.weixin.qq.com/sns/userinfo"
//构造函数
func NewWxController(ctx iris.Context) *WxController {
obj := new(WxController)
obj.platform = ctx.Params().Get("platform")
obj.ctx = ctx
for k, v := range config.PlatformCPs {
if k == obj.platform {
wxSet := v.(map[string]interface{})["wx"].(config.WxSet)
WxAppId = wxSet.AppId
WxAppSecret = wxSet.AppSecret
}
}
return obj
}
// 获取access_token
func wxGetAccessToken(sCode string) (map[string]string, bool) {
params := map[string]string{
"appid": WxAppId,
"secret": WxAppSecret,
"code": sCode,
"grant_type": "authorization_code",
}
reqUrl := utils.BuildUrl(WxAuthUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
result := make(map[string]string)
var ok bool
json.Unmarshal(respData, &result)
if _, exist := result["access_token"]; exist {
ok = true
} else {
ok = false
}
return result, ok
}
func wxGetUserInfo(accessToken, openId string) (userInfo map[string]string, ok bool) {
params := map[string]string{
"access_token": accessToken,
"openid": openId,
}
reqUrl := utils.BuildUrl(WxUserInfoUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
userInfo = make(map[string]string)
json.Unmarshal(respData, &userInfo)
if _, exist := userInfo["openid"]; exist {
ok = true
} else {
ok = false
}
return
}
func (service WxService) createNewUser(userBean xorm.Users, iNow int) (xorm.Users, error) {
session := service.engine.NewSession()
createErr := session.Begin()
defer session.Close()
_, createErr = session.InsertOne(&userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
iUserId := userBean.Id
_, createErr = session.Insert(xorm.Accounts{UserId: iUserId, Updated: iNow})
if createErr != nil {
session.Rollback()
return userBean, createErr
}
token, _ := utils.GenerateToken(&userBean)
userBean.Token = token
userBean.TokenCreated = iNow
userBean.LastLoginTime = iNow
//登录成功以后将token缓存到本地
sTokenTime := strconv.Itoa(userBean.TokenCreated)
_, createErr = session.ID(iUserId).Update(userBean)
if createErr != nil {
session.Rollback()
return user | rId: iUserId, Ip: service.ip, LoginTime: iNow, LoginFrom: service.loginFrom}
_, createErr = session.InsertOne(loginLog)
if createErr != nil {
session.Rollback()
//utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return userBean, createErr
}
err := services.PromotionAward(service.platform, session, userBean.ParentId)
if err != nil {
return userBean, err
}
var innerMsg string
innerMsg, err = services.ActivityAward(service.platform, session, 1, userBean.Id, service.ip)
if err != nil {
session.Rollback()
return userBean, errors.New(innerMsg + "; " + err.Error())
}
createErr = session.Commit()
ut, _ := ramcache.UserNameAndToken.Load(service.platform)
utMap := ut.(map[string][]string)
sUserId := strconv.Itoa(iUserId)
utMap[userBean.UserName] = []string{sUserId, token, sTokenTime, "1"}
utils.UpdateUserIdCard(service.platform, iUserId, map[string]interface{}{
"Username": userBean.UserName,
"Token": userBean.Token,
"TokenCreated": sTokenTime, // 注意,这里要用字符串,否则会提示登录过期
"WxOpenId": userBean.WxOpenId,
})
return userBean, nil
}
// 检查用户是否已经绑定过微信
func (service WxService) checkIsBind(openId string) (userBean xorm.Users, bind bool) {
woiIdx, _ := ramcache.WxOpenIdIndex.Load(service.platform)
woiIdxMap := woiIdx.(map[string]beans.WxOpenId)
var wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: userId,
Phone: userProfile.Phone,
UserName: userProfile.Username,
}
}
return
}
func (service WxService) doLogin(userBean xorm.Users) (xorm.Users, error) {
token, _ := utils.GenerateToken(&userBean)
now := utils.GetNowTime()
var userUpdateBean = xorm.Users{
Token: token,
TokenCreated: now,
LastLoginTime: now,
}
var respUserBean xorm.Users
//开始事务
session := service.engine.NewSession()
err := session.Begin()
defer session.Close()
_, err = session.ID(userBean.Id).Update(userUpdateBean)
if err != nil {
session.Rollback()
return respUserBean, err
}
loginLog := xorm.UserLoginLogs{UserId: userBean.Id, Ip: service.ip, LoginTime: now, LoginFrom: service.loginFrom}
_, err = session.InsertOne(loginLog)
if err != nil {
session.Rollback()
return respUserBean, err
}
err = session.Commit()
if err != nil {
return respUserBean, err
}
userBean.Password = ""
userBean.Token = userUpdateBean.Token
userBean.TokenCreated = userUpdateBean.TokenCreated
userBean.LastLoginTime = userUpdateBean.LastLoginTime
utils.UpdateUserIdCard(service.platform, userBean.Id, map[string]interface{}{
"Token": userBean.Token,
"TokenCreated": strconv.Itoa(now), // 注意,这里要用字符串,否则会提示登录过期
})
return userBean, nil
}
/**
* @api {post} api/v1/wxLogin 微信登录
* @apiDescription
* <span style="color:lightcoral;">接口负责人: aTian</span><br/><br/>
* 微信登录<br>
* 业务描述: 微信账号登录,如果之前没有绑定微信,先创建账号再登录</br>
* @apiVersion 1.0.0
* @apiName WxLogin
* @apiGroup user
* @apiPermission iso,android客户端
* @apiParam (客户端请求参数) {string} code 从微信获取到的code,用于微信OAuth2.0授权登录
* @apiParam (客户端请求参数) {string} parent_id 上级代理用户Id
* @apiParam (客户端请求参数) {string} login_from 登录来源 IOS Android
*
* @apiError (请求失败返回) {int} code 错误代码
* @apiError (请求失败返回) {string} clientMsg 提示信息
* @apiError (请求失败返回) {string} internalMsg 错误代码
* @apiError (请求失败返回) {float} timeConsumed 后台耗时
*
* @apiErrorExample {json} 失败返回
* {
* "code": 204,
* "internalMsg": "",
* "clientMsg ": 0,
* "timeConsumed": 0
* }
*
* @apiSuccess (返回结果) {int} code 200
* @apiSuccess (返回结果) {string} clientMsg 提示信息
* @apiSuccess (返回结果) {string} internalMsg 提示信息
* @apiSuccess (返回结果) {json} data 返回数据
* @apiSuccess (返回结果) {float} timeConsumed 后台耗时
*
* @apiSuccessExample {json} 响应结果
* {
* "clientMsg": "登录成功",
* "code": 200,
* "data": {
* "Id": 207,
* "Phone": "",
* "Password": "",
* "UserName": "15592054945OlocX",
* "Name": "mj1958",
* "Email": "",
* "Created": 1559205494,
* "Birthday": "",
* "MobileType": 1,
* "Sex": 1,
* "Path": "",
* "VipLevel": 1,
* "Qq": "",
* "Wechat": "",
* "Status": 1,
* "ProxyStatus": 0,
* "UserType": 0,
* "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NTk0NjQ2OTUsInBob25lIjoiIiwic3ViIjoyMDcsInVzZXJOYW1lIjoiMTU1OTIwNTQ5NDVPbG9jWCJ9.h9R-y7HtGNgrU3KdE08vdFUH3zJhZ1IzK3q0JPXTSL4",
* "RegIp": "",
* "UniqueCode": "",
* "TokenCreated": 1559205494,
* "SafePassword": "",
* "UserGroupId": "",
* "ParentId": 0,
* "LastLoginTime": 1559205494,
* "LastPlatformId": 0,
* "GroupSize": 0
* },
* "internalMsg": "",
* "timeConsumed": 8123478
* }
*/
func (controller *WxController) WxLogin() {
ctx := controller.ctx
if !utils.RequiredParamPost(&ctx, []string{"code", "login_from"}) {
return
}
sCode := ctx.FormValue("code")
sLoginFrom := ctx.FormValue("login_from")
sParentId := ctx.FormValue("parent_id")
iParentId, transErr := strconv.Atoi(sParentId)
if transErr != nil {
iParentId = 0
}
tokenInfo, tokenOk := wxGetAccessToken(sCode)
if tokenOk {
platform := controller.platform
engine := models.MyEngine[platform]
var service = &WxService{
engine: engine,
platform: platform,
}
sOpenId := tokenInfo["openid"]
userBean, bind := service.checkIsBind(sOpenId)
if bind {
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
userBean, logErr := service.doLogin(userBean)
if logErr == nil {
user := new(xorm.Users)
engine.ID(userBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
} else {
utils.ResFaiJSON(&ctx, logErr.Error(), "微信登录失败", config.NOTGETDATA)
return
}
} else {
iNow := utils.GetNowTime()
sAccessToken := tokenInfo["access_token"]
sOpenId := tokenInfo["openid"]
userInfo, getUserInfoOk := wxGetUserInfo(sAccessToken, sOpenId)
if getUserInfoOk {
var iMobileType = 1
if sLoginFrom == "IOS" {
iMobileType = 2
}
var newUserBean = xorm.DefaultUser()
newUserBean.UserName = "wx" + utils.RandString(5, 2)
newUserBean.Password = ""
newUserBean.Name = userInfo["nickname"]
newUserBean.Phone = ""
newUserBean.ParentId = iParentId
newUserBean.MobileType = iMobileType
newUserBean.WxOpenId = sOpenId
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
var createErr error
newUserBean, createErr = service.createNewUser(newUserBean, iNow)
if createErr != nil {
utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return
} else {
user := new(xorm.Users)
engine.ID(newUserBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
}
} else {
utils.ResFaiJSON(&ctx, "1905301502", "绑定微信失败", config.NOTGETDATA)
return
}
}
} else {
var internalMsg = "1905301706"
var clientMsg = "微信授权失败"
if _, fieldExist := tokenInfo["errcode"]; fieldExist {
internalMsg = tokenInfo["errmsg"]
}
utils.ResFaiJSON(&ctx, internalMsg, clientMsg, config.NOTGETDATA)
return
}
}
| Bean, createErr
}
loginLog := xorm.UserLoginLogs{Use | conditional_block |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct {
platform string
ctx iris.Context
}
type WxService struct {
platform string
loginFrom string
ip string
engine *goXorm.Engine
}
var WxAppId = ""
var WxAppSecret = ""
const WxAuthUrl string = "https://api.weixin.qq.com/sns/oauth2/access_token"
const WxUserInfoUrl string = "https://api.weixin.qq.com/sns/userinfo"
//构造函数
func NewWxController(ctx iris.Context) *WxController {
obj := new(WxController)
obj.platform = ctx.Params().Get("platform")
obj.ctx = ctx
for k, v := range config.PlatformCPs {
if k == obj.platform {
wxSet := v.(map[string]interface{})["wx"].(config.WxSet)
WxAppId = wxSet.AppId
WxAppSecret = wxSet.AppSecret
}
}
return obj
}
// 获取access_token
func wxGetAccessToken(sCode string) (map[string]string, bool) {
params := map[string]string{
"appid": WxAppId,
"secret": WxAppSecret,
"code": sCode,
"grant_type": "authorization_code",
}
reqUrl := utils.BuildUrl(WxAuthUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
result := make(map[string]string)
var ok bool
json.Unmarshal(respData, &result)
if _, exist := result["access_token"]; exist {
ok = true
} else {
ok = false
}
return result, ok
}
func wxGetUserInfo(accessToken, openId string) (userInfo map[string]string, ok bool) {
params := map[string]string{
"access_token": accessToken,
"openid": openId,
}
reqUrl := utils.BuildUrl(WxUserInfoUrl, params)
respData := utils.ReqGet(reqUrl, 10*time.Second)
userInfo = make(map[string]string)
json.Unmarshal(respData, &userInfo)
if _, exist := userInfo["openid"]; exist {
ok = true
} else {
ok = false
}
return
}
func (service WxService) createNewUser(userBean xorm.Users, iNow int) (xorm.Users, error) {
session := service.engine.NewSession()
createErr := session.Begin()
defer session.Close()
_, createErr = session.InsertOne(&userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
iUserId := userBean.Id
_, createErr = session.Insert(xorm.Accounts{UserId: iUserId, Updated: iNow})
if createErr != nil {
session.Rollback()
return userBean, createErr
}
token, _ := utils.GenerateToken(&userBean)
userBean.Token = token
userBean.TokenCreated = iNow
userBean.LastLoginTime = iNow
//登录成功以后将token缓存到本地
sTokenTime := strconv.Itoa(userBean.TokenCreated)
_, createErr = session.ID(iUserId).Update(userBean)
if createErr != nil {
session.Rollback()
return userBean, createErr
}
loginLog := xorm.UserLoginLogs{UserId: iUserId, Ip: service.ip, LoginTime: iNow, LoginFrom: service.loginFrom}
_, createErr = session.InsertOne(loginLog)
if createErr != nil {
session.Rollback()
//utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return userBean, createErr
}
err := services.PromotionAward(service.platform, session, userBean.ParentId)
if err != nil {
return userBean, err
}
var innerMsg string
innerMsg, err = services.ActivityAward(service.platform, session, 1, userBean.Id, service.ip)
if err != nil {
session.Rollback()
return userBean, errors.New(innerMsg + "; " + err.Error())
}
createErr = session.Commit()
ut, _ := ramcache.UserNameAndToken.Load(service.platform) | utMap := ut.(map[string][]string)
sUserId := strconv.Itoa(iUserId)
utMap[userBean.UserName] = []string{sUserId, token, sTokenTime, "1"}
utils.UpdateUserIdCard(service.platform, iUserId, map[string]interface{}{
"Username": userBean.UserName,
"Token": userBean.Token,
"TokenCreated": sTokenTime, // 注意,这里要用字符串,否则会提示登录过期
"WxOpenId": userBean.WxOpenId,
})
return userBean, nil
}
// 检查用户是否已经绑定过微信
func (service WxService) checkIsBind(openId string) (userBean xorm.Users, bind bool) {
woiIdx, _ := ramcache.WxOpenIdIndex.Load(service.platform)
woiIdxMap := woiIdx.(map[string]beans.WxOpenId)
var wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: userId,
Phone: userProfile.Phone,
UserName: userProfile.Username,
}
}
return
}
func (service WxService) doLogin(userBean xorm.Users) (xorm.Users, error) {
token, _ := utils.GenerateToken(&userBean)
now := utils.GetNowTime()
var userUpdateBean = xorm.Users{
Token: token,
TokenCreated: now,
LastLoginTime: now,
}
var respUserBean xorm.Users
//开始事务
session := service.engine.NewSession()
err := session.Begin()
defer session.Close()
_, err = session.ID(userBean.Id).Update(userUpdateBean)
if err != nil {
session.Rollback()
return respUserBean, err
}
loginLog := xorm.UserLoginLogs{UserId: userBean.Id, Ip: service.ip, LoginTime: now, LoginFrom: service.loginFrom}
_, err = session.InsertOne(loginLog)
if err != nil {
session.Rollback()
return respUserBean, err
}
err = session.Commit()
if err != nil {
return respUserBean, err
}
userBean.Password = ""
userBean.Token = userUpdateBean.Token
userBean.TokenCreated = userUpdateBean.TokenCreated
userBean.LastLoginTime = userUpdateBean.LastLoginTime
utils.UpdateUserIdCard(service.platform, userBean.Id, map[string]interface{}{
"Token": userBean.Token,
"TokenCreated": strconv.Itoa(now), // 注意,这里要用字符串,否则会提示登录过期
})
return userBean, nil
}
/**
* @api {post} api/v1/wxLogin 微信登录
* @apiDescription
* <span style="color:lightcoral;">接口负责人: aTian</span><br/><br/>
* 微信登录<br>
* 业务描述: 微信账号登录,如果之前没有绑定微信,先创建账号再登录</br>
* @apiVersion 1.0.0
* @apiName WxLogin
* @apiGroup user
* @apiPermission iso,android客户端
* @apiParam (客户端请求参数) {string} code 从微信获取到的code,用于微信OAuth2.0授权登录
* @apiParam (客户端请求参数) {string} parent_id 上级代理用户Id
* @apiParam (客户端请求参数) {string} login_from 登录来源 IOS Android
*
* @apiError (请求失败返回) {int} code 错误代码
* @apiError (请求失败返回) {string} clientMsg 提示信息
* @apiError (请求失败返回) {string} internalMsg 错误代码
* @apiError (请求失败返回) {float} timeConsumed 后台耗时
*
* @apiErrorExample {json} 失败返回
* {
* "code": 204,
* "internalMsg": "",
* "clientMsg ": 0,
* "timeConsumed": 0
* }
*
* @apiSuccess (返回结果) {int} code 200
* @apiSuccess (返回结果) {string} clientMsg 提示信息
* @apiSuccess (返回结果) {string} internalMsg 提示信息
* @apiSuccess (返回结果) {json} data 返回数据
* @apiSuccess (返回结果) {float} timeConsumed 后台耗时
*
* @apiSuccessExample {json} 响应结果
* {
* "clientMsg": "登录成功",
* "code": 200,
* "data": {
* "Id": 207,
* "Phone": "",
* "Password": "",
* "UserName": "15592054945OlocX",
* "Name": "mj1958",
* "Email": "",
* "Created": 1559205494,
* "Birthday": "",
* "MobileType": 1,
* "Sex": 1,
* "Path": "",
* "VipLevel": 1,
* "Qq": "",
* "Wechat": "",
* "Status": 1,
* "ProxyStatus": 0,
* "UserType": 0,
* "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NTk0NjQ2OTUsInBob25lIjoiIiwic3ViIjoyMDcsInVzZXJOYW1lIjoiMTU1OTIwNTQ5NDVPbG9jWCJ9.h9R-y7HtGNgrU3KdE08vdFUH3zJhZ1IzK3q0JPXTSL4",
* "RegIp": "",
* "UniqueCode": "",
* "TokenCreated": 1559205494,
* "SafePassword": "",
* "UserGroupId": "",
* "ParentId": 0,
* "LastLoginTime": 1559205494,
* "LastPlatformId": 0,
* "GroupSize": 0
* },
* "internalMsg": "",
* "timeConsumed": 8123478
* }
*/
func (controller *WxController) WxLogin() {
ctx := controller.ctx
if !utils.RequiredParamPost(&ctx, []string{"code", "login_from"}) {
return
}
sCode := ctx.FormValue("code")
sLoginFrom := ctx.FormValue("login_from")
sParentId := ctx.FormValue("parent_id")
iParentId, transErr := strconv.Atoi(sParentId)
if transErr != nil {
iParentId = 0
}
tokenInfo, tokenOk := wxGetAccessToken(sCode)
if tokenOk {
platform := controller.platform
engine := models.MyEngine[platform]
var service = &WxService{
engine: engine,
platform: platform,
}
sOpenId := tokenInfo["openid"]
userBean, bind := service.checkIsBind(sOpenId)
if bind {
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
userBean, logErr := service.doLogin(userBean)
if logErr == nil {
user := new(xorm.Users)
engine.ID(userBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
} else {
utils.ResFaiJSON(&ctx, logErr.Error(), "微信登录失败", config.NOTGETDATA)
return
}
} else {
iNow := utils.GetNowTime()
sAccessToken := tokenInfo["access_token"]
sOpenId := tokenInfo["openid"]
userInfo, getUserInfoOk := wxGetUserInfo(sAccessToken, sOpenId)
if getUserInfoOk {
var iMobileType = 1
if sLoginFrom == "IOS" {
iMobileType = 2
}
var newUserBean = xorm.DefaultUser()
newUserBean.UserName = "wx" + utils.RandString(5, 2)
newUserBean.Password = ""
newUserBean.Name = userInfo["nickname"]
newUserBean.Phone = ""
newUserBean.ParentId = iParentId
newUserBean.MobileType = iMobileType
newUserBean.WxOpenId = sOpenId
service.ip = utils.GetIp(ctx.Request())
service.loginFrom = sLoginFrom
var createErr error
newUserBean, createErr = service.createNewUser(newUserBean, iNow)
if createErr != nil {
utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return
} else {
user := new(xorm.Users)
engine.ID(newUserBean.Id).Get(user)
utils.ResSuccJSON(&ctx, "", "登录成功", config.SUCCESSRES, user)
return
}
} else {
utils.ResFaiJSON(&ctx, "1905301502", "绑定微信失败", config.NOTGETDATA)
return
}
}
} else {
var internalMsg = "1905301706"
var clientMsg = "微信授权失败"
if _, fieldExist := tokenInfo["errcode"]; fieldExist {
internalMsg = tokenInfo["errmsg"]
}
utils.ResFaiJSON(&ctx, internalMsg, clientMsg, config.NOTGETDATA)
return
}
} | random_line_split |
|
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has been tested on Windows Server 2008 R2 with Anaconda 4.3.0 2.7 (64-bit).
# It has been tested on Ubuntu 16 with the vendor-supplied Python 2.7 (64-bit).
#
# You need to have a REDCap project and a MySQL database. Access to the MySQL
# database will be over SSL, so you will need to supply SSL key and certs.
#
# Requires Python 2.7, a config file, git, mysql, a DSN, and these packages:
#
# python -m pip install pandas
# python -m pip install sqlalchemy
# python -m pip install ConfigParser
# python -m pip install pycurl
# python -m pip install logging
# python -m pip install datetime
# python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi
#
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
# You will also need the MySQL ODBC Connector (32-bit or 64-bit to match Python).
# https://dev.mysql.com/downloads/connector/odbc/
#
# Usage: python redcap2mysql_odbc.py [Project] [...]
#
# ... Where Project contains letters, numbers, and underscore characters. More
# than one project may be listed, with spaces separating the project names.
#
# This script can be automated with a utility such as cron. Here is an example
# crontab entry which runs the script every day at 8:55 PM:
#
# 55 20 * * * (cd /path/to/folder; /usr/bin/python ./redcap2mysql.py)
#
# Todo:
#
# 1. Add input data validation for all configuration parameters.
# 2. Try to conform to Python coding styles, conventions, and best practices.
# ---------------------------------------------------------------------------
# --------------------------- SETUP -----------------------------------------
# Use Python 3 style print statements.
from __future__ import print_function
# Import packages
import ConfigParser
from sqlalchemy import *
from sys import exit
import os
import sys
from pandas.io import sql
import getpass
import pandas as pd
import certifi
import pycurl
from urllib import urlencode
import hashlib
import logging
import socket
from StringIO import StringIO
import pytz
from datetime import datetime
import re
import git
import traceback
# -----------------------
# Read configuration file
# -----------------------
config_file = 'conf/redcap2mysql.cfg' # See conf/redcap2mysql.cfg.example
# Configure parameters with defaults. Use a config file for most of these.
config = ConfigParser.SafeConfigParser(
{'data_path': 'data', 'log_file': 'redcap2mysql.log',
'log_timestamp_format': '%Y-%m-%d %H:%M:%S %Z',
'mysql_dsn': '', 'mysql_pwd': '', 'mysql_host': '',
'mysql_port': '3306', 'mysql_conn_type': 'pyodbc', 'mysql_user': '',
'redcap_url': 'https://localhost/API/', 'redcap_key': '0123456789ABCDEF',
'redcap_event_name_maxlen': '100'})
if os.path.isfile(config_file) == True:
config.read(config_file)
else:
print("Can't find config file: " + config_file)
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config.get('global', 'data_path', 0)
log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
log_file = config.get('global', 'log_file', 0)
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
redcap_url = config.get('redcap', 'redcap_url', 0)
redcap_key = config.get('redcap', 'redcap_key', 0)
redcap_event_name_maxlen = int(
config.get('redcap', 'redcap_event_name_maxlen', 0))
# -----------------
# Configure logging
# -----------------
log_level = logging.INFO # Set to logging.DEBUG or logging.INFO
# Set log level and timestamp format
logging.basicConfig(filename=log_file, level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt=log_timestamp_format)
# ------------------------
# Configure local git repo
# ------------------------
# Create a local git repository for downloading and archiving data.
try:
repo = git.Repo.init(data_path)
except:
message = "Error: Can't create git repo (%s)! Check config!" % (data_path)
logging.error(message)
raise OSError(message)
# ---------------------------
# Configure local data folder
# ---------------------------
# Create data folder. Should already exist if git repo created without error.
if not os.path.exists(data_path):
try:
os.makedirs(data_path)
except:
message = "Error: Can't create folder (%s)! Check config!" % (data_path)
logging.critical(message)
raise OSError(message)
# -------------------------
# Configure MySQL user name
# -------------------------
# Get username from the operating system, if it is blank (default).
if mysql_user == '':
mysql_user = getpass.getuser()
# ------------------------------------
# Define database connection functions
# ------------------------------------
def get_mysql_pwd(config):
"""Get the MySQL password from the config file or an interactive prompt."""
# Two ways to get the password are supported.
#
# 1. Read clear-text password from config file. (least secure)
# 2. Read password as entered manually from a console prompt. (most secure)
# First try the config file. This is the least secure method. Protect the file.
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
# Try other method if config file password is blank or missing.
if mysql_pwd == '':
# Prompt for the password. More secure, but won't work unattended.
mysql_pwd = getpass.getpass()
return(mysql_pwd)
def get_mysql_conn(config):
"""Configure the MySQL database connection."""
mysql_conn_type = config.get('mysql', 'mysql_conn_type', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
if mysql_user == '':
mysql_user = getpass.getuser()
if mysql_conn_type == 'pyodbc':
mysql_pwd = get_mysql_pwd(config)
mysql_dsn = config.get('mysql', 'mysql_dsn', 0)
# Create database connection.
import pyodbc
DB_URI = "mysql+pyodbc://{user}:{password}@{dsn}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, dsn=mysql_dsn ))
return(conn)
else:
# Try to read encrypted MySQL password from ~/.mylogin.cnf and mysql_path.
mysql_path = config.get('mysql', 'mysql_path', 0)
if mysql_pwd == '':
if mysql_path != '':
# Read encrypted password and decrypt it with mylogin module.
# While better than clear-text, be careful about securing the pw file.
# However, it's probably the best method for unattended use.
try:
# Get encrypted password. This requires the mylogin module.
import mylogin
mysql_host = config.get('mysql', 'mysql_host', 0)
login = mylogin.get_login_info(mysql_path, host=mysql_host)
mysql_pwd = login['passwd']
except mylogin.exception.UtilError as err:
print("mylogin error: {0}".format(err))
else:
mysql_pwd = get_mysql_pwd(config)
# Import packages.
import mysql.connector
from mysql.connector.constants import ClientFlag
# Get SSL settings (file paths to SSL keys and certs).
ssl_ca = config.get('mysql-ssl', 'ssl_ca', 0)
ssl_cert = config.get('mysql-ssl', 'ssl_cert', 0)
ssl_key = config.get('mysql-ssl', 'ssl_key', 0)
# Check for existence of SSL files.
for file_path in (ssl_ca, ssl_cert, ssl_key):
if not os.path.exists(file_path):
message = "Error: Can't find: %s! Check config!" % (file_path)
logging.critical(message)
raise OSError(message)
# Create a dict of SSL settings to pass to create_engine().
ssl_args = {
'client_flags': [ClientFlag.SSL],
'ssl_ca': ssl_ca,
'ssl_cert': ssl_cert,
'ssl_key': ssl_key,
}
# Create database connection.
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_port = config.get('mysql', 'mysql_port', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
DB_URI = "mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, host=mysql_host,
port=mysql_port, db=mysql_db), connect_args = ssl_args )
return(conn)
# -------------------
# Connect to Database
# -------------------
# Create a MySQL connection object based on the configured connection type.
conn = get_mysql_conn(config)
# ------------------------- END SETUP ---------------------------------------
# ----------------
# Define functions
# ----------------
def get_data(csv_file, redcap_key, redcap_url, content):
"""Get REDCap data as a CSV file with an API key, URL and content type."""
with open(csv_file, 'wb') as f:
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.URL, redcap_url)
c.setopt(c.FOLLOWLOCATION, True)
post_data = {'token': redcap_key, 'content': content, \
'rawOrLabel': 'raw', 'type': 'flat', 'format': 'csv', \
'exportSurveyFields': 'True'}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
c.close()
except pycurl.error, err:
c.close()
message = "Error: Can't fetch data. Check config: " + config_file
print(message)
logging.critical(message)
exit(2)
def get_prev_hash(project, mysql_table, log_table, conn = conn):
"""Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0[0]
# If the table is found, find the most recent hash for the table data.
prev_hash = ''
if res == log_table:
sql_cmd = 'SELECT sha1_hash FROM %s ' % (log_table) + \
'WHERE table_name = "%s" ' % (mysql_table) + \
'ORDER BY timestamp_utc DESC ' + \
'LIMIT 1;'
rs = sql.execute(sql_cmd, conn)
row0 = rs.fetchone()
if (row0 is not None) and (len(row0) != 0):
prev_hash = row0[0]
return(prev_hash)
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = pd.read_csv(csv_file, index_col=False)
data.insert(0, 'id', range(1, 1 + len(data)))
return(data)
except pd.parser.CParserError, err:
message = "Can't parse REDCap data. Check CSV file: " + csv_file
print(message)
logging.critical(message)
exit(3)
else:
message = "CSV file does not contain data: " + csv_file
print(message)
logging.warning(message)
return(None)
else:
message = "Can't read CSV file: " + csv_file
print(message)
logging.critical(message)
exit(4)
def | (file_name):
"""Create a hash of a file."""
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(file_name, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def send_to_db(data_path, project, csv_file, dataset, mysql_table, log_table,
rcform = '', redcap_key = redcap_key, redcap_url = redcap_url,
conn = conn, mysql_user = mysql_user,
redcap_event_name_maxlen = redcap_event_name_maxlen):
"""Send data from REDCap to a MySQL (or MariaDB) database."""
if project != '':
# Prepend project name.
csv_file = project + '_' + csv_file
mysql_table = project + '_' + mysql_table
log_table = project + '_' + log_table
rcform = project + '_' + rcform
# Prepend file_path to csv_file.
csv_file = os.path.join(data_path, csv_file)
# Get the data from REDCap.
if project != '':
redcap_key = config.get('redcap', project + '_' + 'redcap_key', 0)
get_data(csv_file, redcap_key, redcap_url, dataset)
data = parse_csv(csv_file)
if data is None:
return(None)
# Calculate the file size and a hash (checksum) for recording in the log.
csv_file_size = os.path.getsize(csv_file)
csv_file_hash = hash_file(csv_file)
# If dataset == 'metadata' and csv_file_hash does not match the
# previous value ("prev_hash", for the more recent update), then save
# the old rcmeta and rcform tables with a datestamped name suffix
# and create new tables for 'metadata' for 'record' data.
prev_hash_same = False
prev_hash = get_prev_hash(project, mysql_table, log_table)
if csv_file_hash == prev_hash:
prev_hash_same = True
else:
if prev_hash != '' and dataset == 'metadata':
timestamp = '{:%Y%m%dT%H%M%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(rcform, rcform + '_' + timestamp), conn)
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(mysql_table, mysql_table + '_' + timestamp), conn)
# If the data has changed since the last sync, write to database and log.
if prev_hash_same == False:
# Set the data type for the redcap_event_name if this column is present.
data_dtype_dict = {}
if 'redcap_event_name' in list(data.columns.values):
data_dtype_dict['redcap_event_name'] = String(redcap_event_name_maxlen)
# Set the data type for variables ending with _timestamp as DateTime
r = re.compile('.*_timestamp$')
timestamp_columns = filter(r.match, list(data.columns.values))
for column in timestamp_columns:
data_dtype_dict[column] = DateTime
# Send the data to the database.
data.to_sql(name = mysql_table, con = conn, if_exists = 'replace',
index = False, dtype = data_dtype_dict)
# Create a ISO 8601 timestamp for logging. Use UTC for consistency.
timestamp = '{:%Y-%m-%dT%H:%M:%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
# Create the log message string as a comma-separated list of values.
log_str = '{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(
timestamp, mysql_user, socket.gethostname(), len(data.index),
len(data.columns), mysql_table, csv_file, csv_file_size, csv_file_hash)
# Create a dataframe for the log message.
log_df = pd.read_csv(StringIO(log_str), header=None, index_col=False)
log_df.columns = ['timestamp_utc', 'user_name', 'host_name', 'num_rows',
'num_cols', 'table_name', 'file_name', 'size_bytes', 'sha1_hash']
# Convert the timestamp column to the datetime data type.
log_df.timestamp_utc = pd.to_datetime(
log_df.timestamp_utc, yearfirst=True, utc=True)
# Send the log message dataframe to the database.
log_df.to_sql(name = log_table, con = conn, if_exists = 'append',
index = False, dtype = {'timestamp_utc':DateTime})
# Write the log message to the log file.
logging.info("to " + log_table + ": " + log_str)
def commit_changes(repo, project = ''):
"""Track changes to transferred data files in local git repository."""
cmd = repo.git
cmd.add(all=True)
try:
cmd.commit(m="redcap2mysql.py data sync for project " + project)
except git.exc.GitCommandError, err:
logging.info([traceback.format_exc(limit=1).splitlines()[-1]])
def send_data(data_path, project = ''):
"""Get REDCap data and send to MySQL."""
# Send metadata
send_to_db(data_path, project, 'rcmeta.csv', 'metadata', 'rcmeta', 'rcxfer', 'rcform')
# Send events
#send_to_db(data_path, project, 'rcevent.csv', 'event', 'rcevent', 'rcxfer')
# Send arms
#send_to_db(data_path, project, 'rcarm.csv', 'arm', 'rcarm', 'rcxfer')
# Send Form Event Mappings (fems)
#send_to_db(data_path, project, 'rcfem.csv', 'formEventMapping', 'rcfem', 'rcxfer')
# Send users
send_to_db(data_path, project, 'rcuser.csv', 'user', 'rcuser', 'rcxfer')
# Send instruments
send_to_db(data_path, project, 'rcinst.csv', 'instrument', 'rcinst', 'rcxfer')
# Send records
send_to_db(data_path, project, 'rcform.csv', 'record', 'rcform', 'rcxfer')
# Commit changes to local repo
commit_changes(repo, project)
# --------------
# Transfer data
# --------------
# Get the project name(s) from the script argument(s), if present.
# The project must only contain letters, numbers, and underscore characters.
if len(sys.argv) > 1:
pattern = re.compile('^[A-Za-z0-9_]+$')
for project in sys.argv[1:]:
if pattern.match(project):
send_data(data_path, project)
else:
message = "Error: Invalid project name: %s" % (project)
print(message)
logging.critical(message)
exit(5)
else:
send_data(data_path)
| hash_file | identifier_name |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has been tested on Windows Server 2008 R2 with Anaconda 4.3.0 2.7 (64-bit).
# It has been tested on Ubuntu 16 with the vendor-supplied Python 2.7 (64-bit).
#
# You need to have a REDCap project and a MySQL database. Access to the MySQL
# database will be over SSL, so you will need to supply SSL key and certs.
#
# Requires Python 2.7, a config file, git, mysql, a DSN, and these packages:
#
# python -m pip install pandas
# python -m pip install sqlalchemy
# python -m pip install ConfigParser
# python -m pip install pycurl
# python -m pip install logging
# python -m pip install datetime
# python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi
#
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
# You will also need the MySQL ODBC Connector (32-bit or 64-bit to match Python).
# https://dev.mysql.com/downloads/connector/odbc/
#
# Usage: python redcap2mysql_odbc.py [Project] [...]
#
# ... Where Project contains letters, numbers, and underscore characters. More
# than one project may be listed, with spaces separating the project names.
#
# This script can be automated with a utility such as cron. Here is an example
# crontab entry which runs the script every day at 8:55 PM:
#
# 55 20 * * * (cd /path/to/folder; /usr/bin/python ./redcap2mysql.py)
#
# Todo:
#
# 1. Add input data validation for all configuration parameters.
# 2. Try to conform to Python coding styles, conventions, and best practices.
# ---------------------------------------------------------------------------
# --------------------------- SETUP -----------------------------------------
# Use Python 3 style print statements.
from __future__ import print_function
# Import packages
import ConfigParser
from sqlalchemy import *
from sys import exit
import os
import sys
from pandas.io import sql
import getpass
import pandas as pd
import certifi
import pycurl
from urllib import urlencode
import hashlib
import logging
import socket
from StringIO import StringIO
import pytz
from datetime import datetime
import re
import git
import traceback
# -----------------------
# Read configuration file
# -----------------------
config_file = 'conf/redcap2mysql.cfg' # See conf/redcap2mysql.cfg.example
# Configure parameters with defaults. Use a config file for most of these.
config = ConfigParser.SafeConfigParser(
{'data_path': 'data', 'log_file': 'redcap2mysql.log',
'log_timestamp_format': '%Y-%m-%d %H:%M:%S %Z',
'mysql_dsn': '', 'mysql_pwd': '', 'mysql_host': '',
'mysql_port': '3306', 'mysql_conn_type': 'pyodbc', 'mysql_user': '',
'redcap_url': 'https://localhost/API/', 'redcap_key': '0123456789ABCDEF',
'redcap_event_name_maxlen': '100'})
if os.path.isfile(config_file) == True:
|
else:
print("Can't find config file: " + config_file)
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config.get('global', 'data_path', 0)
log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
log_file = config.get('global', 'log_file', 0)
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
redcap_url = config.get('redcap', 'redcap_url', 0)
redcap_key = config.get('redcap', 'redcap_key', 0)
redcap_event_name_maxlen = int(
config.get('redcap', 'redcap_event_name_maxlen', 0))
# -----------------
# Configure logging
# -----------------
log_level = logging.INFO # Set to logging.DEBUG or logging.INFO
# Set log level and timestamp format
logging.basicConfig(filename=log_file, level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt=log_timestamp_format)
# ------------------------
# Configure local git repo
# ------------------------
# Create a local git repository for downloading and archiving data.
try:
repo = git.Repo.init(data_path)
except:
message = "Error: Can't create git repo (%s)! Check config!" % (data_path)
logging.error(message)
raise OSError(message)
# ---------------------------
# Configure local data folder
# ---------------------------
# Create data folder. Should already exist if git repo created without error.
if not os.path.exists(data_path):
try:
os.makedirs(data_path)
except:
message = "Error: Can't create folder (%s)! Check config!" % (data_path)
logging.critical(message)
raise OSError(message)
# -------------------------
# Configure MySQL user name
# -------------------------
# Get username from the operating system, if it is blank (default).
if mysql_user == '':
mysql_user = getpass.getuser()
# ------------------------------------
# Define database connection functions
# ------------------------------------
def get_mysql_pwd(config):
"""Get the MySQL password from the config file or an interactive prompt."""
# Two ways to get the password are supported.
#
# 1. Read clear-text password from config file. (least secure)
# 2. Read password as entered manually from a console prompt. (most secure)
# First try the config file. This is the least secure method. Protect the file.
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
# Try other method if config file password is blank or missing.
if mysql_pwd == '':
# Prompt for the password. More secure, but won't work unattended.
mysql_pwd = getpass.getpass()
return(mysql_pwd)
def get_mysql_conn(config):
"""Configure the MySQL database connection."""
mysql_conn_type = config.get('mysql', 'mysql_conn_type', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
if mysql_user == '':
mysql_user = getpass.getuser()
if mysql_conn_type == 'pyodbc':
mysql_pwd = get_mysql_pwd(config)
mysql_dsn = config.get('mysql', 'mysql_dsn', 0)
# Create database connection.
import pyodbc
DB_URI = "mysql+pyodbc://{user}:{password}@{dsn}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, dsn=mysql_dsn ))
return(conn)
else:
# Try to read encrypted MySQL password from ~/.mylogin.cnf and mysql_path.
mysql_path = config.get('mysql', 'mysql_path', 0)
if mysql_pwd == '':
if mysql_path != '':
# Read encrypted password and decrypt it with mylogin module.
# While better than clear-text, be careful about securing the pw file.
# However, it's probably the best method for unattended use.
try:
# Get encrypted password. This requires the mylogin module.
import mylogin
mysql_host = config.get('mysql', 'mysql_host', 0)
login = mylogin.get_login_info(mysql_path, host=mysql_host)
mysql_pwd = login['passwd']
except mylogin.exception.UtilError as err:
print("mylogin error: {0}".format(err))
else:
mysql_pwd = get_mysql_pwd(config)
# Import packages.
import mysql.connector
from mysql.connector.constants import ClientFlag
# Get SSL settings (file paths to SSL keys and certs).
ssl_ca = config.get('mysql-ssl', 'ssl_ca', 0)
ssl_cert = config.get('mysql-ssl', 'ssl_cert', 0)
ssl_key = config.get('mysql-ssl', 'ssl_key', 0)
# Check for existence of SSL files.
for file_path in (ssl_ca, ssl_cert, ssl_key):
if not os.path.exists(file_path):
message = "Error: Can't find: %s! Check config!" % (file_path)
logging.critical(message)
raise OSError(message)
# Create a dict of SSL settings to pass to create_engine().
ssl_args = {
'client_flags': [ClientFlag.SSL],
'ssl_ca': ssl_ca,
'ssl_cert': ssl_cert,
'ssl_key': ssl_key,
}
# Create database connection.
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_port = config.get('mysql', 'mysql_port', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
DB_URI = "mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, host=mysql_host,
port=mysql_port, db=mysql_db), connect_args = ssl_args )
return(conn)
# -------------------
# Connect to Database
# -------------------
# Create a MySQL connection object based on the configured connection type.
conn = get_mysql_conn(config)
# ------------------------- END SETUP ---------------------------------------
# ----------------
# Define functions
# ----------------
def get_data(csv_file, redcap_key, redcap_url, content):
"""Get REDCap data as a CSV file with an API key, URL and content type."""
with open(csv_file, 'wb') as f:
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.URL, redcap_url)
c.setopt(c.FOLLOWLOCATION, True)
post_data = {'token': redcap_key, 'content': content, \
'rawOrLabel': 'raw', 'type': 'flat', 'format': 'csv', \
'exportSurveyFields': 'True'}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
c.close()
except pycurl.error, err:
c.close()
message = "Error: Can't fetch data. Check config: " + config_file
print(message)
logging.critical(message)
exit(2)
def get_prev_hash(project, mysql_table, log_table, conn = conn):
"""Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0[0]
# If the table is found, find the most recent hash for the table data.
prev_hash = ''
if res == log_table:
sql_cmd = 'SELECT sha1_hash FROM %s ' % (log_table) + \
'WHERE table_name = "%s" ' % (mysql_table) + \
'ORDER BY timestamp_utc DESC ' + \
'LIMIT 1;'
rs = sql.execute(sql_cmd, conn)
row0 = rs.fetchone()
if (row0 is not None) and (len(row0) != 0):
prev_hash = row0[0]
return(prev_hash)
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = pd.read_csv(csv_file, index_col=False)
data.insert(0, 'id', range(1, 1 + len(data)))
return(data)
except pd.parser.CParserError, err:
message = "Can't parse REDCap data. Check CSV file: " + csv_file
print(message)
logging.critical(message)
exit(3)
else:
message = "CSV file does not contain data: " + csv_file
print(message)
logging.warning(message)
return(None)
else:
message = "Can't read CSV file: " + csv_file
print(message)
logging.critical(message)
exit(4)
def hash_file(file_name):
"""Create a hash of a file."""
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(file_name, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def send_to_db(data_path, project, csv_file, dataset, mysql_table, log_table,
rcform = '', redcap_key = redcap_key, redcap_url = redcap_url,
conn = conn, mysql_user = mysql_user,
redcap_event_name_maxlen = redcap_event_name_maxlen):
"""Send data from REDCap to a MySQL (or MariaDB) database."""
if project != '':
# Prepend project name.
csv_file = project + '_' + csv_file
mysql_table = project + '_' + mysql_table
log_table = project + '_' + log_table
rcform = project + '_' + rcform
# Prepend file_path to csv_file.
csv_file = os.path.join(data_path, csv_file)
# Get the data from REDCap.
if project != '':
redcap_key = config.get('redcap', project + '_' + 'redcap_key', 0)
get_data(csv_file, redcap_key, redcap_url, dataset)
data = parse_csv(csv_file)
if data is None:
return(None)
# Calculate the file size and a hash (checksum) for recording in the log.
csv_file_size = os.path.getsize(csv_file)
csv_file_hash = hash_file(csv_file)
# If dataset == 'metadata' and csv_file_hash does not match the
# previous value ("prev_hash", for the more recent update), then save
# the old rcmeta and rcform tables with a datestamped name suffix
# and create new tables for 'metadata' for 'record' data.
prev_hash_same = False
prev_hash = get_prev_hash(project, mysql_table, log_table)
if csv_file_hash == prev_hash:
prev_hash_same = True
else:
if prev_hash != '' and dataset == 'metadata':
timestamp = '{:%Y%m%dT%H%M%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(rcform, rcform + '_' + timestamp), conn)
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(mysql_table, mysql_table + '_' + timestamp), conn)
# If the data has changed since the last sync, write to database and log.
if prev_hash_same == False:
# Set the data type for the redcap_event_name if this column is present.
data_dtype_dict = {}
if 'redcap_event_name' in list(data.columns.values):
data_dtype_dict['redcap_event_name'] = String(redcap_event_name_maxlen)
# Set the data type for variables ending with _timestamp as DateTime
r = re.compile('.*_timestamp$')
timestamp_columns = filter(r.match, list(data.columns.values))
for column in timestamp_columns:
data_dtype_dict[column] = DateTime
# Send the data to the database.
data.to_sql(name = mysql_table, con = conn, if_exists = 'replace',
index = False, dtype = data_dtype_dict)
# Create a ISO 8601 timestamp for logging. Use UTC for consistency.
timestamp = '{:%Y-%m-%dT%H:%M:%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
# Create the log message string as a comma-separated list of values.
log_str = '{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(
timestamp, mysql_user, socket.gethostname(), len(data.index),
len(data.columns), mysql_table, csv_file, csv_file_size, csv_file_hash)
# Create a dataframe for the log message.
log_df = pd.read_csv(StringIO(log_str), header=None, index_col=False)
log_df.columns = ['timestamp_utc', 'user_name', 'host_name', 'num_rows',
'num_cols', 'table_name', 'file_name', 'size_bytes', 'sha1_hash']
# Convert the timestamp column to the datetime data type.
log_df.timestamp_utc = pd.to_datetime(
log_df.timestamp_utc, yearfirst=True, utc=True)
# Send the log message dataframe to the database.
log_df.to_sql(name = log_table, con = conn, if_exists = 'append',
index = False, dtype = {'timestamp_utc':DateTime})
# Write the log message to the log file.
logging.info("to " + log_table + ": " + log_str)
def commit_changes(repo, project = ''):
"""Track changes to transferred data files in local git repository."""
cmd = repo.git
cmd.add(all=True)
try:
cmd.commit(m="redcap2mysql.py data sync for project " + project)
except git.exc.GitCommandError, err:
logging.info([traceback.format_exc(limit=1).splitlines()[-1]])
def send_data(data_path, project = ''):
"""Get REDCap data and send to MySQL."""
# Send metadata
send_to_db(data_path, project, 'rcmeta.csv', 'metadata', 'rcmeta', 'rcxfer', 'rcform')
# Send events
#send_to_db(data_path, project, 'rcevent.csv', 'event', 'rcevent', 'rcxfer')
# Send arms
#send_to_db(data_path, project, 'rcarm.csv', 'arm', 'rcarm', 'rcxfer')
# Send Form Event Mappings (fems)
#send_to_db(data_path, project, 'rcfem.csv', 'formEventMapping', 'rcfem', 'rcxfer')
# Send users
send_to_db(data_path, project, 'rcuser.csv', 'user', 'rcuser', 'rcxfer')
# Send instruments
send_to_db(data_path, project, 'rcinst.csv', 'instrument', 'rcinst', 'rcxfer')
# Send records
send_to_db(data_path, project, 'rcform.csv', 'record', 'rcform', 'rcxfer')
# Commit changes to local repo
commit_changes(repo, project)
# --------------
# Transfer data
# --------------
# Get the project name(s) from the script argument(s), if present.
# The project must only contain letters, numbers, and underscore characters.
if len(sys.argv) > 1:
pattern = re.compile('^[A-Za-z0-9_]+$')
for project in sys.argv[1:]:
if pattern.match(project):
send_data(data_path, project)
else:
message = "Error: Invalid project name: %s" % (project)
print(message)
logging.critical(message)
exit(5)
else:
send_data(data_path)
| config.read(config_file) | conditional_block |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has been tested on Windows Server 2008 R2 with Anaconda 4.3.0 2.7 (64-bit).
# It has been tested on Ubuntu 16 with the vendor-supplied Python 2.7 (64-bit).
#
# You need to have a REDCap project and a MySQL database. Access to the MySQL
# database will be over SSL, so you will need to supply SSL key and certs.
#
# Requires Python 2.7, a config file, git, mysql, a DSN, and these packages:
#
# python -m pip install pandas
# python -m pip install sqlalchemy
# python -m pip install ConfigParser
# python -m pip install pycurl
# python -m pip install logging
# python -m pip install datetime | #
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
# You will also need the MySQL ODBC Connector (32-bit or 64-bit to match Python).
# https://dev.mysql.com/downloads/connector/odbc/
#
# Usage: python redcap2mysql_odbc.py [Project] [...]
#
# ... Where Project contains letters, numbers, and underscore characters. More
# than one project may be listed, with spaces separating the project names.
#
# This script can be automated with a utility such as cron. Here is an example
# crontab entry which runs the script every day at 8:55 PM:
#
# 55 20 * * * (cd /path/to/folder; /usr/bin/python ./redcap2mysql.py)
#
# Todo:
#
# 1. Add input data validation for all configuration parameters.
# 2. Try to conform to Python coding styles, conventions, and best practices.
# ---------------------------------------------------------------------------
# --------------------------- SETUP -----------------------------------------
# Use Python 3 style print statements.
from __future__ import print_function
# Import packages
import ConfigParser
from sqlalchemy import *
from sys import exit
import os
import sys
from pandas.io import sql
import getpass
import pandas as pd
import certifi
import pycurl
from urllib import urlencode
import hashlib
import logging
import socket
from StringIO import StringIO
import pytz
from datetime import datetime
import re
import git
import traceback
# -----------------------
# Read configuration file
# -----------------------
config_file = 'conf/redcap2mysql.cfg' # See conf/redcap2mysql.cfg.example
# Configure parameters with defaults. Use a config file for most of these.
config = ConfigParser.SafeConfigParser(
{'data_path': 'data', 'log_file': 'redcap2mysql.log',
'log_timestamp_format': '%Y-%m-%d %H:%M:%S %Z',
'mysql_dsn': '', 'mysql_pwd': '', 'mysql_host': '',
'mysql_port': '3306', 'mysql_conn_type': 'pyodbc', 'mysql_user': '',
'redcap_url': 'https://localhost/API/', 'redcap_key': '0123456789ABCDEF',
'redcap_event_name_maxlen': '100'})
if os.path.isfile(config_file) == True:
config.read(config_file)
else:
print("Can't find config file: " + config_file)
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config.get('global', 'data_path', 0)
log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
log_file = config.get('global', 'log_file', 0)
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
redcap_url = config.get('redcap', 'redcap_url', 0)
redcap_key = config.get('redcap', 'redcap_key', 0)
redcap_event_name_maxlen = int(
config.get('redcap', 'redcap_event_name_maxlen', 0))
# -----------------
# Configure logging
# -----------------
log_level = logging.INFO # Set to logging.DEBUG or logging.INFO
# Set log level and timestamp format
logging.basicConfig(filename=log_file, level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt=log_timestamp_format)
# ------------------------
# Configure local git repo
# ------------------------
# Create a local git repository for downloading and archiving data.
try:
repo = git.Repo.init(data_path)
except:
message = "Error: Can't create git repo (%s)! Check config!" % (data_path)
logging.error(message)
raise OSError(message)
# ---------------------------
# Configure local data folder
# ---------------------------
# Create data folder. Should already exist if git repo created without error.
if not os.path.exists(data_path):
try:
os.makedirs(data_path)
except:
message = "Error: Can't create folder (%s)! Check config!" % (data_path)
logging.critical(message)
raise OSError(message)
# -------------------------
# Configure MySQL user name
# -------------------------
# Get username from the operating system, if it is blank (default).
if mysql_user == '':
mysql_user = getpass.getuser()
# ------------------------------------
# Define database connection functions
# ------------------------------------
def get_mysql_pwd(config):
"""Get the MySQL password from the config file or an interactive prompt."""
# Two ways to get the password are supported.
#
# 1. Read clear-text password from config file. (least secure)
# 2. Read password as entered manually from a console prompt. (most secure)
# First try the config file. This is the least secure method. Protect the file.
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
# Try other method if config file password is blank or missing.
if mysql_pwd == '':
# Prompt for the password. More secure, but won't work unattended.
mysql_pwd = getpass.getpass()
return(mysql_pwd)
def get_mysql_conn(config):
"""Configure the MySQL database connection."""
mysql_conn_type = config.get('mysql', 'mysql_conn_type', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
if mysql_user == '':
mysql_user = getpass.getuser()
if mysql_conn_type == 'pyodbc':
mysql_pwd = get_mysql_pwd(config)
mysql_dsn = config.get('mysql', 'mysql_dsn', 0)
# Create database connection.
import pyodbc
DB_URI = "mysql+pyodbc://{user}:{password}@{dsn}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, dsn=mysql_dsn ))
return(conn)
else:
# Try to read encrypted MySQL password from ~/.mylogin.cnf and mysql_path.
mysql_path = config.get('mysql', 'mysql_path', 0)
if mysql_pwd == '':
if mysql_path != '':
# Read encrypted password and decrypt it with mylogin module.
# While better than clear-text, be careful about securing the pw file.
# However, it's probably the best method for unattended use.
try:
# Get encrypted password. This requires the mylogin module.
import mylogin
mysql_host = config.get('mysql', 'mysql_host', 0)
login = mylogin.get_login_info(mysql_path, host=mysql_host)
mysql_pwd = login['passwd']
except mylogin.exception.UtilError as err:
print("mylogin error: {0}".format(err))
else:
mysql_pwd = get_mysql_pwd(config)
# Import packages.
import mysql.connector
from mysql.connector.constants import ClientFlag
# Get SSL settings (file paths to SSL keys and certs).
ssl_ca = config.get('mysql-ssl', 'ssl_ca', 0)
ssl_cert = config.get('mysql-ssl', 'ssl_cert', 0)
ssl_key = config.get('mysql-ssl', 'ssl_key', 0)
# Check for existence of SSL files.
for file_path in (ssl_ca, ssl_cert, ssl_key):
if not os.path.exists(file_path):
message = "Error: Can't find: %s! Check config!" % (file_path)
logging.critical(message)
raise OSError(message)
# Create a dict of SSL settings to pass to create_engine().
ssl_args = {
'client_flags': [ClientFlag.SSL],
'ssl_ca': ssl_ca,
'ssl_cert': ssl_cert,
'ssl_key': ssl_key,
}
# Create database connection.
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_port = config.get('mysql', 'mysql_port', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
DB_URI = "mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, host=mysql_host,
port=mysql_port, db=mysql_db), connect_args = ssl_args )
return(conn)
# -------------------
# Connect to Database
# -------------------
# Create a MySQL connection object based on the configured connection type.
conn = get_mysql_conn(config)
# ------------------------- END SETUP ---------------------------------------
# ----------------
# Define functions
# ----------------
def get_data(csv_file, redcap_key, redcap_url, content):
"""Get REDCap data as a CSV file with an API key, URL and content type."""
with open(csv_file, 'wb') as f:
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.URL, redcap_url)
c.setopt(c.FOLLOWLOCATION, True)
post_data = {'token': redcap_key, 'content': content, \
'rawOrLabel': 'raw', 'type': 'flat', 'format': 'csv', \
'exportSurveyFields': 'True'}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
c.close()
except pycurl.error, err:
c.close()
message = "Error: Can't fetch data. Check config: " + config_file
print(message)
logging.critical(message)
exit(2)
def get_prev_hash(project, mysql_table, log_table, conn = conn):
"""Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0[0]
# If the table is found, find the most recent hash for the table data.
prev_hash = ''
if res == log_table:
sql_cmd = 'SELECT sha1_hash FROM %s ' % (log_table) + \
'WHERE table_name = "%s" ' % (mysql_table) + \
'ORDER BY timestamp_utc DESC ' + \
'LIMIT 1;'
rs = sql.execute(sql_cmd, conn)
row0 = rs.fetchone()
if (row0 is not None) and (len(row0) != 0):
prev_hash = row0[0]
return(prev_hash)
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = pd.read_csv(csv_file, index_col=False)
data.insert(0, 'id', range(1, 1 + len(data)))
return(data)
except pd.parser.CParserError, err:
message = "Can't parse REDCap data. Check CSV file: " + csv_file
print(message)
logging.critical(message)
exit(3)
else:
message = "CSV file does not contain data: " + csv_file
print(message)
logging.warning(message)
return(None)
else:
message = "Can't read CSV file: " + csv_file
print(message)
logging.critical(message)
exit(4)
def hash_file(file_name):
"""Create a hash of a file."""
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(file_name, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def send_to_db(data_path, project, csv_file, dataset, mysql_table, log_table,
rcform = '', redcap_key = redcap_key, redcap_url = redcap_url,
conn = conn, mysql_user = mysql_user,
redcap_event_name_maxlen = redcap_event_name_maxlen):
"""Send data from REDCap to a MySQL (or MariaDB) database."""
if project != '':
# Prepend project name.
csv_file = project + '_' + csv_file
mysql_table = project + '_' + mysql_table
log_table = project + '_' + log_table
rcform = project + '_' + rcform
# Prepend file_path to csv_file.
csv_file = os.path.join(data_path, csv_file)
# Get the data from REDCap.
if project != '':
redcap_key = config.get('redcap', project + '_' + 'redcap_key', 0)
get_data(csv_file, redcap_key, redcap_url, dataset)
data = parse_csv(csv_file)
if data is None:
return(None)
# Calculate the file size and a hash (checksum) for recording in the log.
csv_file_size = os.path.getsize(csv_file)
csv_file_hash = hash_file(csv_file)
# If dataset == 'metadata' and csv_file_hash does not match the
# previous value ("prev_hash", for the more recent update), then save
# the old rcmeta and rcform tables with a datestamped name suffix
# and create new tables for 'metadata' for 'record' data.
prev_hash_same = False
prev_hash = get_prev_hash(project, mysql_table, log_table)
if csv_file_hash == prev_hash:
prev_hash_same = True
else:
if prev_hash != '' and dataset == 'metadata':
timestamp = '{:%Y%m%dT%H%M%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(rcform, rcform + '_' + timestamp), conn)
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(mysql_table, mysql_table + '_' + timestamp), conn)
# If the data has changed since the last sync, write to database and log.
if prev_hash_same == False:
# Set the data type for the redcap_event_name if this column is present.
data_dtype_dict = {}
if 'redcap_event_name' in list(data.columns.values):
data_dtype_dict['redcap_event_name'] = String(redcap_event_name_maxlen)
# Set the data type for variables ending with _timestamp as DateTime
r = re.compile('.*_timestamp$')
timestamp_columns = filter(r.match, list(data.columns.values))
for column in timestamp_columns:
data_dtype_dict[column] = DateTime
# Send the data to the database.
data.to_sql(name = mysql_table, con = conn, if_exists = 'replace',
index = False, dtype = data_dtype_dict)
# Create a ISO 8601 timestamp for logging. Use UTC for consistency.
timestamp = '{:%Y-%m-%dT%H:%M:%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
# Create the log message string as a comma-separated list of values.
log_str = '{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(
timestamp, mysql_user, socket.gethostname(), len(data.index),
len(data.columns), mysql_table, csv_file, csv_file_size, csv_file_hash)
# Create a dataframe for the log message.
log_df = pd.read_csv(StringIO(log_str), header=None, index_col=False)
log_df.columns = ['timestamp_utc', 'user_name', 'host_name', 'num_rows',
'num_cols', 'table_name', 'file_name', 'size_bytes', 'sha1_hash']
# Convert the timestamp column to the datetime data type.
log_df.timestamp_utc = pd.to_datetime(
log_df.timestamp_utc, yearfirst=True, utc=True)
# Send the log message dataframe to the database.
log_df.to_sql(name = log_table, con = conn, if_exists = 'append',
index = False, dtype = {'timestamp_utc':DateTime})
# Write the log message to the log file.
logging.info("to " + log_table + ": " + log_str)
def commit_changes(repo, project = ''):
"""Track changes to transferred data files in local git repository."""
cmd = repo.git
cmd.add(all=True)
try:
cmd.commit(m="redcap2mysql.py data sync for project " + project)
except git.exc.GitCommandError, err:
logging.info([traceback.format_exc(limit=1).splitlines()[-1]])
def send_data(data_path, project = ''):
"""Get REDCap data and send to MySQL."""
# Send metadata
send_to_db(data_path, project, 'rcmeta.csv', 'metadata', 'rcmeta', 'rcxfer', 'rcform')
# Send events
#send_to_db(data_path, project, 'rcevent.csv', 'event', 'rcevent', 'rcxfer')
# Send arms
#send_to_db(data_path, project, 'rcarm.csv', 'arm', 'rcarm', 'rcxfer')
# Send Form Event Mappings (fems)
#send_to_db(data_path, project, 'rcfem.csv', 'formEventMapping', 'rcfem', 'rcxfer')
# Send users
send_to_db(data_path, project, 'rcuser.csv', 'user', 'rcuser', 'rcxfer')
# Send instruments
send_to_db(data_path, project, 'rcinst.csv', 'instrument', 'rcinst', 'rcxfer')
# Send records
send_to_db(data_path, project, 'rcform.csv', 'record', 'rcform', 'rcxfer')
# Commit changes to local repo
commit_changes(repo, project)
# --------------
# Transfer data
# --------------
# Get the project name(s) from the script argument(s), if present.
# The project must only contain letters, numbers, and underscore characters.
if len(sys.argv) > 1:
pattern = re.compile('^[A-Za-z0-9_]+$')
for project in sys.argv[1:]:
if pattern.match(project):
send_data(data_path, project)
else:
message = "Error: Invalid project name: %s" % (project)
print(message)
logging.critical(message)
exit(5)
else:
send_data(data_path) | # python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi | random_line_split |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has been tested on Windows Server 2008 R2 with Anaconda 4.3.0 2.7 (64-bit).
# It has been tested on Ubuntu 16 with the vendor-supplied Python 2.7 (64-bit).
#
# You need to have a REDCap project and a MySQL database. Access to the MySQL
# database will be over SSL, so you will need to supply SSL key and certs.
#
# Requires Python 2.7, a config file, git, mysql, a DSN, and these packages:
#
# python -m pip install pandas
# python -m pip install sqlalchemy
# python -m pip install ConfigParser
# python -m pip install pycurl
# python -m pip install logging
# python -m pip install datetime
# python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi
#
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
# You will also need the MySQL ODBC Connector (32-bit or 64-bit to match Python).
# https://dev.mysql.com/downloads/connector/odbc/
#
# Usage: python redcap2mysql_odbc.py [Project] [...]
#
# ... Where Project contains letters, numbers, and underscore characters. More
# than one project may be listed, with spaces separating the project names.
#
# This script can be automated with a utility such as cron. Here is an example
# crontab entry which runs the script every day at 8:55 PM:
#
# 55 20 * * * (cd /path/to/folder; /usr/bin/python ./redcap2mysql.py)
#
# Todo:
#
# 1. Add input data validation for all configuration parameters.
# 2. Try to conform to Python coding styles, conventions, and best practices.
# ---------------------------------------------------------------------------
# --------------------------- SETUP -----------------------------------------
# Use Python 3 style print statements.
from __future__ import print_function
# Import packages
import ConfigParser
from sqlalchemy import *
from sys import exit
import os
import sys
from pandas.io import sql
import getpass
import pandas as pd
import certifi
import pycurl
from urllib import urlencode
import hashlib
import logging
import socket
from StringIO import StringIO
import pytz
from datetime import datetime
import re
import git
import traceback
# -----------------------
# Read configuration file
# -----------------------
config_file = 'conf/redcap2mysql.cfg' # See conf/redcap2mysql.cfg.example
# Configure parameters with defaults. Use a config file for most of these.
config = ConfigParser.SafeConfigParser(
{'data_path': 'data', 'log_file': 'redcap2mysql.log',
'log_timestamp_format': '%Y-%m-%d %H:%M:%S %Z',
'mysql_dsn': '', 'mysql_pwd': '', 'mysql_host': '',
'mysql_port': '3306', 'mysql_conn_type': 'pyodbc', 'mysql_user': '',
'redcap_url': 'https://localhost/API/', 'redcap_key': '0123456789ABCDEF',
'redcap_event_name_maxlen': '100'})
if os.path.isfile(config_file) == True:
config.read(config_file)
else:
print("Can't find config file: " + config_file)
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config.get('global', 'data_path', 0)
log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
log_file = config.get('global', 'log_file', 0)
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
redcap_url = config.get('redcap', 'redcap_url', 0)
redcap_key = config.get('redcap', 'redcap_key', 0)
redcap_event_name_maxlen = int(
config.get('redcap', 'redcap_event_name_maxlen', 0))
# -----------------
# Configure logging
# -----------------
log_level = logging.INFO # Set to logging.DEBUG or logging.INFO
# Set log level and timestamp format
logging.basicConfig(filename=log_file, level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt=log_timestamp_format)
# ------------------------
# Configure local git repo
# ------------------------
# Create a local git repository for downloading and archiving data.
try:
repo = git.Repo.init(data_path)
except:
message = "Error: Can't create git repo (%s)! Check config!" % (data_path)
logging.error(message)
raise OSError(message)
# ---------------------------
# Configure local data folder
# ---------------------------
# Create data folder. Should already exist if git repo created without error.
if not os.path.exists(data_path):
try:
os.makedirs(data_path)
except:
message = "Error: Can't create folder (%s)! Check config!" % (data_path)
logging.critical(message)
raise OSError(message)
# -------------------------
# Configure MySQL user name
# -------------------------
# Get username from the operating system, if it is blank (default).
if mysql_user == '':
mysql_user = getpass.getuser()
# ------------------------------------
# Define database connection functions
# ------------------------------------
def get_mysql_pwd(config):
"""Get the MySQL password from the config file or an interactive prompt."""
# Two ways to get the password are supported.
#
# 1. Read clear-text password from config file. (least secure)
# 2. Read password as entered manually from a console prompt. (most secure)
# First try the config file. This is the least secure method. Protect the file.
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
# Try other method if config file password is blank or missing.
if mysql_pwd == '':
# Prompt for the password. More secure, but won't work unattended.
mysql_pwd = getpass.getpass()
return(mysql_pwd)
def get_mysql_conn(config):
"""Configure the MySQL database connection."""
mysql_conn_type = config.get('mysql', 'mysql_conn_type', 0)
mysql_user = config.get('mysql', 'mysql_user', 0)
mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
if mysql_user == '':
mysql_user = getpass.getuser()
if mysql_conn_type == 'pyodbc':
mysql_pwd = get_mysql_pwd(config)
mysql_dsn = config.get('mysql', 'mysql_dsn', 0)
# Create database connection.
import pyodbc
DB_URI = "mysql+pyodbc://{user}:{password}@{dsn}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, dsn=mysql_dsn ))
return(conn)
else:
# Try to read encrypted MySQL password from ~/.mylogin.cnf and mysql_path.
mysql_path = config.get('mysql', 'mysql_path', 0)
if mysql_pwd == '':
if mysql_path != '':
# Read encrypted password and decrypt it with mylogin module.
# While better than clear-text, be careful about securing the pw file.
# However, it's probably the best method for unattended use.
try:
# Get encrypted password. This requires the mylogin module.
import mylogin
mysql_host = config.get('mysql', 'mysql_host', 0)
login = mylogin.get_login_info(mysql_path, host=mysql_host)
mysql_pwd = login['passwd']
except mylogin.exception.UtilError as err:
print("mylogin error: {0}".format(err))
else:
mysql_pwd = get_mysql_pwd(config)
# Import packages.
import mysql.connector
from mysql.connector.constants import ClientFlag
# Get SSL settings (file paths to SSL keys and certs).
ssl_ca = config.get('mysql-ssl', 'ssl_ca', 0)
ssl_cert = config.get('mysql-ssl', 'ssl_cert', 0)
ssl_key = config.get('mysql-ssl', 'ssl_key', 0)
# Check for existence of SSL files.
for file_path in (ssl_ca, ssl_cert, ssl_key):
if not os.path.exists(file_path):
message = "Error: Can't find: %s! Check config!" % (file_path)
logging.critical(message)
raise OSError(message)
# Create a dict of SSL settings to pass to create_engine().
ssl_args = {
'client_flags': [ClientFlag.SSL],
'ssl_ca': ssl_ca,
'ssl_cert': ssl_cert,
'ssl_key': ssl_key,
}
# Create database connection.
mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_port = config.get('mysql', 'mysql_port', 0)
mysql_db = config.get('mysql', 'mysql_db', 0)
DB_URI = "mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=mysql_pwd, host=mysql_host,
port=mysql_port, db=mysql_db), connect_args = ssl_args )
return(conn)
# -------------------
# Connect to Database
# -------------------
# Create a MySQL connection object based on the configured connection type.
conn = get_mysql_conn(config)
# ------------------------- END SETUP ---------------------------------------
# ----------------
# Define functions
# ----------------
def get_data(csv_file, redcap_key, redcap_url, content):
"""Get REDCap data as a CSV file with an API key, URL and content type."""
with open(csv_file, 'wb') as f:
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.URL, redcap_url)
c.setopt(c.FOLLOWLOCATION, True)
post_data = {'token': redcap_key, 'content': content, \
'rawOrLabel': 'raw', 'type': 'flat', 'format': 'csv', \
'exportSurveyFields': 'True'}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
c.close()
except pycurl.error, err:
c.close()
message = "Error: Can't fetch data. Check config: " + config_file
print(message)
logging.critical(message)
exit(2)
def get_prev_hash(project, mysql_table, log_table, conn = conn):
|
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = pd.read_csv(csv_file, index_col=False)
data.insert(0, 'id', range(1, 1 + len(data)))
return(data)
except pd.parser.CParserError, err:
message = "Can't parse REDCap data. Check CSV file: " + csv_file
print(message)
logging.critical(message)
exit(3)
else:
message = "CSV file does not contain data: " + csv_file
print(message)
logging.warning(message)
return(None)
else:
message = "Can't read CSV file: " + csv_file
print(message)
logging.critical(message)
exit(4)
def hash_file(file_name):
"""Create a hash of a file."""
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(file_name, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def send_to_db(data_path, project, csv_file, dataset, mysql_table, log_table,
rcform = '', redcap_key = redcap_key, redcap_url = redcap_url,
conn = conn, mysql_user = mysql_user,
redcap_event_name_maxlen = redcap_event_name_maxlen):
"""Send data from REDCap to a MySQL (or MariaDB) database."""
if project != '':
# Prepend project name.
csv_file = project + '_' + csv_file
mysql_table = project + '_' + mysql_table
log_table = project + '_' + log_table
rcform = project + '_' + rcform
# Prepend file_path to csv_file.
csv_file = os.path.join(data_path, csv_file)
# Get the data from REDCap.
if project != '':
redcap_key = config.get('redcap', project + '_' + 'redcap_key', 0)
get_data(csv_file, redcap_key, redcap_url, dataset)
data = parse_csv(csv_file)
if data is None:
return(None)
# Calculate the file size and a hash (checksum) for recording in the log.
csv_file_size = os.path.getsize(csv_file)
csv_file_hash = hash_file(csv_file)
# If dataset == 'metadata' and csv_file_hash does not match the
# previous value ("prev_hash", for the more recent update), then save
# the old rcmeta and rcform tables with a datestamped name suffix
# and create new tables for 'metadata' for 'record' data.
prev_hash_same = False
prev_hash = get_prev_hash(project, mysql_table, log_table)
if csv_file_hash == prev_hash:
prev_hash_same = True
else:
if prev_hash != '' and dataset == 'metadata':
timestamp = '{:%Y%m%dT%H%M%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(rcform, rcform + '_' + timestamp), conn)
rs = sql.execute('RENAME TABLE %s TO %s;' % \
(mysql_table, mysql_table + '_' + timestamp), conn)
# If the data has changed since the last sync, write to database and log.
if prev_hash_same == False:
# Set the data type for the redcap_event_name if this column is present.
data_dtype_dict = {}
if 'redcap_event_name' in list(data.columns.values):
data_dtype_dict['redcap_event_name'] = String(redcap_event_name_maxlen)
# Set the data type for variables ending with _timestamp as DateTime
r = re.compile('.*_timestamp$')
timestamp_columns = filter(r.match, list(data.columns.values))
for column in timestamp_columns:
data_dtype_dict[column] = DateTime
# Send the data to the database.
data.to_sql(name = mysql_table, con = conn, if_exists = 'replace',
index = False, dtype = data_dtype_dict)
# Create a ISO 8601 timestamp for logging. Use UTC for consistency.
timestamp = '{:%Y-%m-%dT%H:%M:%SZ}'.format(
datetime.utcnow().replace(tzinfo=pytz.utc))
# Create the log message string as a comma-separated list of values.
log_str = '{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(
timestamp, mysql_user, socket.gethostname(), len(data.index),
len(data.columns), mysql_table, csv_file, csv_file_size, csv_file_hash)
# Create a dataframe for the log message.
log_df = pd.read_csv(StringIO(log_str), header=None, index_col=False)
log_df.columns = ['timestamp_utc', 'user_name', 'host_name', 'num_rows',
'num_cols', 'table_name', 'file_name', 'size_bytes', 'sha1_hash']
# Convert the timestamp column to the datetime data type.
log_df.timestamp_utc = pd.to_datetime(
log_df.timestamp_utc, yearfirst=True, utc=True)
# Send the log message dataframe to the database.
log_df.to_sql(name = log_table, con = conn, if_exists = 'append',
index = False, dtype = {'timestamp_utc':DateTime})
# Write the log message to the log file.
logging.info("to " + log_table + ": " + log_str)
def commit_changes(repo, project = ''):
"""Track changes to transferred data files in local git repository."""
cmd = repo.git
cmd.add(all=True)
try:
cmd.commit(m="redcap2mysql.py data sync for project " + project)
except git.exc.GitCommandError, err:
logging.info([traceback.format_exc(limit=1).splitlines()[-1]])
def send_data(data_path, project = ''):
"""Get REDCap data and send to MySQL."""
# Send metadata
send_to_db(data_path, project, 'rcmeta.csv', 'metadata', 'rcmeta', 'rcxfer', 'rcform')
# Send events
#send_to_db(data_path, project, 'rcevent.csv', 'event', 'rcevent', 'rcxfer')
# Send arms
#send_to_db(data_path, project, 'rcarm.csv', 'arm', 'rcarm', 'rcxfer')
# Send Form Event Mappings (fems)
#send_to_db(data_path, project, 'rcfem.csv', 'formEventMapping', 'rcfem', 'rcxfer')
# Send users
send_to_db(data_path, project, 'rcuser.csv', 'user', 'rcuser', 'rcxfer')
# Send instruments
send_to_db(data_path, project, 'rcinst.csv', 'instrument', 'rcinst', 'rcxfer')
# Send records
send_to_db(data_path, project, 'rcform.csv', 'record', 'rcform', 'rcxfer')
# Commit changes to local repo
commit_changes(repo, project)
# --------------
# Transfer data
# --------------
# Get the project name(s) from the script argument(s), if present.
# The project must only contain letters, numbers, and underscore characters.
if len(sys.argv) > 1:
pattern = re.compile('^[A-Za-z0-9_]+$')
for project in sys.argv[1:]:
if pattern.match(project):
send_data(data_path, project)
else:
message = "Error: Invalid project name: %s" % (project)
print(message)
logging.critical(message)
exit(5)
else:
send_data(data_path)
| """Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0[0]
# If the table is found, find the most recent hash for the table data.
prev_hash = ''
if res == log_table:
sql_cmd = 'SELECT sha1_hash FROM %s ' % (log_table) + \
'WHERE table_name = "%s" ' % (mysql_table) + \
'ORDER BY timestamp_utc DESC ' + \
'LIMIT 1;'
rs = sql.execute(sql_cmd, conn)
row0 = rs.fetchone()
if (row0 is not None) and (len(row0) != 0):
prev_hash = row0[0]
return(prev_hash) | identifier_body |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Value is the value for an associated key in a key/value pair
Value string `json:"value"`
}
// MessageCardSectionImage represents an image as used by the heroImage and
// images properties of a section.
type MessageCardSectionImage struct {
// Image is the URL to the image.
Image string `json:"image"`
// Title is a short description of the image. Typically, this description
// is displayed in a tooltip as the user hovers their mouse over the
// image.
Title string `json:"title"`
}
// MessageCardSection represents a section to include in a message card.
type MessageCardSection struct {
// Title is the title property of a section. This property is displayed
// in a font that stands out, while not as prominent as the card's title.
// It is meant to introduce the section and summarize its content,
// similarly to how the card's title property is meant to summarize the
// whole card.
Title string `json:"title,omitempty"`
// Text is the section's text property. This property is very similar to
// the text property of the card. It can be used for the same purpose.
Text string `json:"text,omitempty"`
// ActivityImage is a property used to display a picture associated with
// the subject of a message card. For example, this might be the portrait
// of a person who performed an activity that the message card is
// associated with.
ActivityImage string `json:"activityImage,omitempty"`
// ActivityTitle is a property used to summarize the activity associated
// with a message card.
ActivityTitle string `json:"activityTitle,omitempty"`
// ActivitySubtitle is a property used to show brief, but extended
// information about an activity associated with a message card. Examples
// include the date and time the associated activity was taken or the
// handle of a person associated with the activity.
ActivitySubtitle string `json:"activitySubtitle,omitempty"`
// ActivityText is a property used to provide details about the activity.
// For example, if the message card is used to deliver updates about a
// topic, then this property would be used to hold the bulk of the content
// for the update notification.
ActivityText string `json:"activityText,omitempty"`
// Markdown represents a toggle to enable or disable Markdown formatting.
// By default, all text fields in a card and its sections can be formatted
// using basic Markdown.
Markdown bool `json:"markdown,omitempty"`
// StartGroup is the section's startGroup property. This property marks
// the start of a logical group of information. Typically, sections with
// startGroup set to true will be visually separated from previous card
// elements.
StartGroup bool `json:"startGroup,omitempty"`
// HeroImage is a property that allows for setting an image as the
// centerpiece of a message card. This property can also be used to add a
// banner to the message card.
// Note: heroImage is not currently supported by Microsoft Teams
// https://stackoverflow.com/a/45389789
// We use a pointer to this type in order to have the json package
// properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
HeroImage *MessageCardSectionImage `json:"heroImage,omitempty"`
// Facts is a collection of MessageCardSectionFact values. A section entry
// usually is displayed in a two-column key/value format.
Facts []MessageCardSectionFact `json:"facts,omitempty"`
// Images is a property that allows for the inclusion of a photo gallery
// inside a section.
// We use a slice of pointers to this type in order to have the json
// package properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
Images []*MessageCardSectionImage `json:"images,omitempty"`
}
// MessageCard represents a legacy actionable message card used via Office 365
// or Microsoft Teams connectors.
type MessageCard struct {
// Required; must be set to "MessageCard"
Type string `json:"@type"`
// Required; must be set to "https://schema.org/extensions"
Context string `json:"@context"`
// Summary is required if the card does not contain a text property,
// otherwise optional. The summary property is typically displayed in the
// list view in Outlook, as a way to quickly determine what the card is
// all about. Summary appears to only be used when there are sections defined
Summary string `json:"summary,omitempty"`
// Title is the title property of a card. is meant to be rendered in a
// prominent way, at the very top of the card. Use it to introduce the
// content of the card in such a way users will immediately know what to
// expect.
Title string `json:"title,omitempty"`
// Text is required if the card does not contain a summary property,
// otherwise optional. The text property is meant to be displayed in a
// normal font below the card's title. Use it to display content, such as
// the description of the entity being referenced, or an abstract of a
// news article.
Text string `json:"text,omitempty"`
// Specifies a custom brand color for the card. The color will be
// displayed in a non-obtrusive manner.
ThemeColor string `json:"themeColor,omitempty"`
// Sections is a collection of sections to include in the card.
Sections []*MessageCardSection `json:"sections,omitempty"`
}
// AddSection adds one or many additional MessageCardSection values to a
// MessageCard. Validation is performed to reject invalid values with an error
// message. | if s == nil {
return fmt.Errorf("func AddSection: nil MessageCardSection received")
}
// Perform validation of all MessageCardSection fields in an effort to
// avoid adding a MessageCardSection with zero value fields. This is
// done to avoid generating an empty sections JSON array since the
// Sections slice for the MessageCard type would technically not be at
// a zero value state. Due to this non-zero value state, the
// encoding/json package would end up including the Sections struct
// field in the output JSON.
// See also https://github.com/golang/go/issues/11939
switch {
// If any of these cases trigger, skip over the `default` case
// statement and add the section.
case s.Images != nil:
case s.Facts != nil:
case s.HeroImage != nil:
case s.StartGroup:
case s.Markdown:
case s.ActivityText != "":
case s.ActivitySubtitle != "":
case s.ActivityTitle != "":
case s.ActivityImage != "":
case s.Text != "":
case s.Title != "":
default:
return fmt.Errorf("all fields found to be at zero-value, skipping section")
}
mc.Sections = append(mc.Sections, s)
}
return nil
}
// AddFact adds one or many additional MessageCardSectionFact values to a
// MessageCardSection
func (mcs *MessageCardSection) AddFact(fact ...MessageCardSectionFact) error {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
}
// AddFactFromKeyValue accepts a key and slice of values and converts them to
// MessageCardSectionFact values
func (mcs *MessageCardSection) AddFactFromKeyValue(key string, values ...string) error {
// validate arguments
if key == "" {
return errors.New("empty key received for new fact")
}
if len(values) < 1 {
return errors.New("no values received for new fact")
}
fact := MessageCardSectionFact{
Name: key,
Value: strings.Join(values, ", "),
}
// TODO: Explicitly define or use constructor?
// fact := NewMessageCardSectionFact()
// fact.Name = key
// fact.Value = strings.Join(values, ", ")
mcs.Facts = append(mcs.Facts, fact)
// if we made it this far then all should be well
return nil
}
// AddImage adds an image to a MessageCard section. These images are used to
// provide a photo gallery inside a MessageCard section.
func (mcs *MessageCardSection) AddImage(sectionImage ...MessageCardSectionImage) error {
for i := range sectionImage {
if sectionImage[i].Image == "" {
return fmt.Errorf("cannot add empty image URL")
}
if sectionImage[i].Title == "" {
return fmt.Errorf("cannot add empty image title")
}
mcs.Images = append(mcs.Images, §ionImage[i])
}
return nil
}
// AddHeroImageStr adds a Hero Image to a MessageCard section using string
// arguments. This image is used as the centerpiece or banner of a message
// card.
func (mcs *MessageCardSection) AddHeroImageStr(imageURL string, imageTitle string) error {
if imageURL == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if imageTitle == "" {
return fmt.Errorf("cannot add empty hero image title")
}
heroImage := MessageCardSectionImage{
Image: imageURL,
Title: imageTitle,
}
// TODO: Explicitly define or use constructor?
// heroImage := NewMessageCardSectionImage()
// heroImage.Image = imageURL
// heroImage.Title = imageTitle
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// AddHeroImage adds a Hero Image to a MessageCard section using a
// MessageCardSectionImage argument. This image is used as the centerpiece or
// banner of a message card.
func (mcs *MessageCardSection) AddHeroImage(heroImage MessageCardSectionImage) error {
if heroImage.Image == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if heroImage.Title == "" {
return fmt.Errorf("cannot add empty hero image title")
}
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// NewMessageCard creates a new message card with fields required by the
// legacy message card format already predefined
func NewMessageCard() MessageCard {
// define expected values to meet Office 365 Connector card requirements
// https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#card-fields
msgCard := MessageCard{
Type: "MessageCard",
Context: "https://schema.org/extensions",
}
return msgCard
}
// NewMessageCardSection creates an empty message card section
func NewMessageCardSection() *MessageCardSection {
msgCardSection := MessageCardSection{}
return &msgCardSection
}
// NewMessageCardSectionFact creates an empty message card section fact
func NewMessageCardSectionFact() MessageCardSectionFact {
msgCardSectionFact := MessageCardSectionFact{}
return msgCardSectionFact
}
// NewMessageCardSectionImage creates an empty image for use with message card
// section
func NewMessageCardSectionImage() MessageCardSectionImage {
msgCardSectionImage := MessageCardSectionImage{}
return msgCardSectionImage
} | func (mc *MessageCard) AddSection(section ...*MessageCardSection) error {
for _, s := range section {
// bail if a completely nil section provided | random_line_split |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Value is the value for an associated key in a key/value pair
Value string `json:"value"`
}
// MessageCardSectionImage represents an image as used by the heroImage and
// images properties of a section.
type MessageCardSectionImage struct {
// Image is the URL to the image.
Image string `json:"image"`
// Title is a short description of the image. Typically, this description
// is displayed in a tooltip as the user hovers their mouse over the
// image.
Title string `json:"title"`
}
// MessageCardSection represents a section to include in a message card.
type MessageCardSection struct {
// Title is the title property of a section. This property is displayed
// in a font that stands out, while not as prominent as the card's title.
// It is meant to introduce the section and summarize its content,
// similarly to how the card's title property is meant to summarize the
// whole card.
Title string `json:"title,omitempty"`
// Text is the section's text property. This property is very similar to
// the text property of the card. It can be used for the same purpose.
Text string `json:"text,omitempty"`
// ActivityImage is a property used to display a picture associated with
// the subject of a message card. For example, this might be the portrait
// of a person who performed an activity that the message card is
// associated with.
ActivityImage string `json:"activityImage,omitempty"`
// ActivityTitle is a property used to summarize the activity associated
// with a message card.
ActivityTitle string `json:"activityTitle,omitempty"`
// ActivitySubtitle is a property used to show brief, but extended
// information about an activity associated with a message card. Examples
// include the date and time the associated activity was taken or the
// handle of a person associated with the activity.
ActivitySubtitle string `json:"activitySubtitle,omitempty"`
// ActivityText is a property used to provide details about the activity.
// For example, if the message card is used to deliver updates about a
// topic, then this property would be used to hold the bulk of the content
// for the update notification.
ActivityText string `json:"activityText,omitempty"`
// Markdown represents a toggle to enable or disable Markdown formatting.
// By default, all text fields in a card and its sections can be formatted
// using basic Markdown.
Markdown bool `json:"markdown,omitempty"`
// StartGroup is the section's startGroup property. This property marks
// the start of a logical group of information. Typically, sections with
// startGroup set to true will be visually separated from previous card
// elements.
StartGroup bool `json:"startGroup,omitempty"`
// HeroImage is a property that allows for setting an image as the
// centerpiece of a message card. This property can also be used to add a
// banner to the message card.
// Note: heroImage is not currently supported by Microsoft Teams
// https://stackoverflow.com/a/45389789
// We use a pointer to this type in order to have the json package
// properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
HeroImage *MessageCardSectionImage `json:"heroImage,omitempty"`
// Facts is a collection of MessageCardSectionFact values. A section entry
// usually is displayed in a two-column key/value format.
Facts []MessageCardSectionFact `json:"facts,omitempty"`
// Images is a property that allows for the inclusion of a photo gallery
// inside a section.
// We use a slice of pointers to this type in order to have the json
// package properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
Images []*MessageCardSectionImage `json:"images,omitempty"`
}
// MessageCard represents a legacy actionable message card used via Office 365
// or Microsoft Teams connectors.
type MessageCard struct {
// Required; must be set to "MessageCard"
Type string `json:"@type"`
// Required; must be set to "https://schema.org/extensions"
Context string `json:"@context"`
// Summary is required if the card does not contain a text property,
// otherwise optional. The summary property is typically displayed in the
// list view in Outlook, as a way to quickly determine what the card is
// all about. Summary appears to only be used when there are sections defined
Summary string `json:"summary,omitempty"`
// Title is the title property of a card. is meant to be rendered in a
// prominent way, at the very top of the card. Use it to introduce the
// content of the card in such a way users will immediately know what to
// expect.
Title string `json:"title,omitempty"`
// Text is required if the card does not contain a summary property,
// otherwise optional. The text property is meant to be displayed in a
// normal font below the card's title. Use it to display content, such as
// the description of the entity being referenced, or an abstract of a
// news article.
Text string `json:"text,omitempty"`
// Specifies a custom brand color for the card. The color will be
// displayed in a non-obtrusive manner.
ThemeColor string `json:"themeColor,omitempty"`
// Sections is a collection of sections to include in the card.
Sections []*MessageCardSection `json:"sections,omitempty"`
}
// AddSection adds one or many additional MessageCardSection values to a
// MessageCard. Validation is performed to reject invalid values with an error
// message.
func (mc *MessageCard) AddSection(section ...*MessageCardSection) error {
for _, s := range section {
// bail if a completely nil section provided
if s == nil {
return fmt.Errorf("func AddSection: nil MessageCardSection received")
}
// Perform validation of all MessageCardSection fields in an effort to
// avoid adding a MessageCardSection with zero value fields. This is
// done to avoid generating an empty sections JSON array since the
// Sections slice for the MessageCard type would technically not be at
// a zero value state. Due to this non-zero value state, the
// encoding/json package would end up including the Sections struct
// field in the output JSON.
// See also https://github.com/golang/go/issues/11939
switch {
// If any of these cases trigger, skip over the `default` case
// statement and add the section.
case s.Images != nil:
case s.Facts != nil:
case s.HeroImage != nil:
case s.StartGroup:
case s.Markdown:
case s.ActivityText != "":
case s.ActivitySubtitle != "":
case s.ActivityTitle != "":
case s.ActivityImage != "":
case s.Text != "":
case s.Title != "":
default:
return fmt.Errorf("all fields found to be at zero-value, skipping section")
}
mc.Sections = append(mc.Sections, s)
}
return nil
}
// AddFact adds one or many additional MessageCardSectionFact values to a
// MessageCardSection
func (mcs *MessageCardSection) | (fact ...MessageCardSectionFact) error {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
}
// AddFactFromKeyValue accepts a key and slice of values and converts them to
// MessageCardSectionFact values
func (mcs *MessageCardSection) AddFactFromKeyValue(key string, values ...string) error {
// validate arguments
if key == "" {
return errors.New("empty key received for new fact")
}
if len(values) < 1 {
return errors.New("no values received for new fact")
}
fact := MessageCardSectionFact{
Name: key,
Value: strings.Join(values, ", "),
}
// TODO: Explicitly define or use constructor?
// fact := NewMessageCardSectionFact()
// fact.Name = key
// fact.Value = strings.Join(values, ", ")
mcs.Facts = append(mcs.Facts, fact)
// if we made it this far then all should be well
return nil
}
// AddImage adds an image to a MessageCard section. These images are used to
// provide a photo gallery inside a MessageCard section.
func (mcs *MessageCardSection) AddImage(sectionImage ...MessageCardSectionImage) error {
for i := range sectionImage {
if sectionImage[i].Image == "" {
return fmt.Errorf("cannot add empty image URL")
}
if sectionImage[i].Title == "" {
return fmt.Errorf("cannot add empty image title")
}
mcs.Images = append(mcs.Images, §ionImage[i])
}
return nil
}
// AddHeroImageStr adds a Hero Image to a MessageCard section using string
// arguments. This image is used as the centerpiece or banner of a message
// card.
func (mcs *MessageCardSection) AddHeroImageStr(imageURL string, imageTitle string) error {
if imageURL == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if imageTitle == "" {
return fmt.Errorf("cannot add empty hero image title")
}
heroImage := MessageCardSectionImage{
Image: imageURL,
Title: imageTitle,
}
// TODO: Explicitly define or use constructor?
// heroImage := NewMessageCardSectionImage()
// heroImage.Image = imageURL
// heroImage.Title = imageTitle
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// AddHeroImage adds a Hero Image to a MessageCard section using a
// MessageCardSectionImage argument. This image is used as the centerpiece or
// banner of a message card.
func (mcs *MessageCardSection) AddHeroImage(heroImage MessageCardSectionImage) error {
if heroImage.Image == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if heroImage.Title == "" {
return fmt.Errorf("cannot add empty hero image title")
}
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// NewMessageCard creates a new message card with fields required by the
// legacy message card format already predefined
func NewMessageCard() MessageCard {
// define expected values to meet Office 365 Connector card requirements
// https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#card-fields
msgCard := MessageCard{
Type: "MessageCard",
Context: "https://schema.org/extensions",
}
return msgCard
}
// NewMessageCardSection creates an empty message card section
func NewMessageCardSection() *MessageCardSection {
msgCardSection := MessageCardSection{}
return &msgCardSection
}
// NewMessageCardSectionFact creates an empty message card section fact
func NewMessageCardSectionFact() MessageCardSectionFact {
msgCardSectionFact := MessageCardSectionFact{}
return msgCardSectionFact
}
// NewMessageCardSectionImage creates an empty image for use with message card
// section
func NewMessageCardSectionImage() MessageCardSectionImage {
msgCardSectionImage := MessageCardSectionImage{}
return msgCardSectionImage
}
| AddFact | identifier_name |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Value is the value for an associated key in a key/value pair
Value string `json:"value"`
}
// MessageCardSectionImage represents an image as used by the heroImage and
// images properties of a section.
type MessageCardSectionImage struct {
// Image is the URL to the image.
Image string `json:"image"`
// Title is a short description of the image. Typically, this description
// is displayed in a tooltip as the user hovers their mouse over the
// image.
Title string `json:"title"`
}
// MessageCardSection represents a section to include in a message card.
type MessageCardSection struct {
// Title is the title property of a section. This property is displayed
// in a font that stands out, while not as prominent as the card's title.
// It is meant to introduce the section and summarize its content,
// similarly to how the card's title property is meant to summarize the
// whole card.
Title string `json:"title,omitempty"`
// Text is the section's text property. This property is very similar to
// the text property of the card. It can be used for the same purpose.
Text string `json:"text,omitempty"`
// ActivityImage is a property used to display a picture associated with
// the subject of a message card. For example, this might be the portrait
// of a person who performed an activity that the message card is
// associated with.
ActivityImage string `json:"activityImage,omitempty"`
// ActivityTitle is a property used to summarize the activity associated
// with a message card.
ActivityTitle string `json:"activityTitle,omitempty"`
// ActivitySubtitle is a property used to show brief, but extended
// information about an activity associated with a message card. Examples
// include the date and time the associated activity was taken or the
// handle of a person associated with the activity.
ActivitySubtitle string `json:"activitySubtitle,omitempty"`
// ActivityText is a property used to provide details about the activity.
// For example, if the message card is used to deliver updates about a
// topic, then this property would be used to hold the bulk of the content
// for the update notification.
ActivityText string `json:"activityText,omitempty"`
// Markdown represents a toggle to enable or disable Markdown formatting.
// By default, all text fields in a card and its sections can be formatted
// using basic Markdown.
Markdown bool `json:"markdown,omitempty"`
// StartGroup is the section's startGroup property. This property marks
// the start of a logical group of information. Typically, sections with
// startGroup set to true will be visually separated from previous card
// elements.
StartGroup bool `json:"startGroup,omitempty"`
// HeroImage is a property that allows for setting an image as the
// centerpiece of a message card. This property can also be used to add a
// banner to the message card.
// Note: heroImage is not currently supported by Microsoft Teams
// https://stackoverflow.com/a/45389789
// We use a pointer to this type in order to have the json package
// properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
HeroImage *MessageCardSectionImage `json:"heroImage,omitempty"`
// Facts is a collection of MessageCardSectionFact values. A section entry
// usually is displayed in a two-column key/value format.
Facts []MessageCardSectionFact `json:"facts,omitempty"`
// Images is a property that allows for the inclusion of a photo gallery
// inside a section.
// We use a slice of pointers to this type in order to have the json
// package properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
Images []*MessageCardSectionImage `json:"images,omitempty"`
}
// MessageCard represents a legacy actionable message card used via Office 365
// or Microsoft Teams connectors.
type MessageCard struct {
// Required; must be set to "MessageCard"
Type string `json:"@type"`
// Required; must be set to "https://schema.org/extensions"
Context string `json:"@context"`
// Summary is required if the card does not contain a text property,
// otherwise optional. The summary property is typically displayed in the
// list view in Outlook, as a way to quickly determine what the card is
// all about. Summary appears to only be used when there are sections defined
Summary string `json:"summary,omitempty"`
// Title is the title property of a card. is meant to be rendered in a
// prominent way, at the very top of the card. Use it to introduce the
// content of the card in such a way users will immediately know what to
// expect.
Title string `json:"title,omitempty"`
// Text is required if the card does not contain a summary property,
// otherwise optional. The text property is meant to be displayed in a
// normal font below the card's title. Use it to display content, such as
// the description of the entity being referenced, or an abstract of a
// news article.
Text string `json:"text,omitempty"`
// Specifies a custom brand color for the card. The color will be
// displayed in a non-obtrusive manner.
ThemeColor string `json:"themeColor,omitempty"`
// Sections is a collection of sections to include in the card.
Sections []*MessageCardSection `json:"sections,omitempty"`
}
// AddSection adds one or many additional MessageCardSection values to a
// MessageCard. Validation is performed to reject invalid values with an error
// message.
func (mc *MessageCard) AddSection(section ...*MessageCardSection) error {
for _, s := range section {
// bail if a completely nil section provided
if s == nil {
return fmt.Errorf("func AddSection: nil MessageCardSection received")
}
// Perform validation of all MessageCardSection fields in an effort to
// avoid adding a MessageCardSection with zero value fields. This is
// done to avoid generating an empty sections JSON array since the
// Sections slice for the MessageCard type would technically not be at
// a zero value state. Due to this non-zero value state, the
// encoding/json package would end up including the Sections struct
// field in the output JSON.
// See also https://github.com/golang/go/issues/11939
switch {
// If any of these cases trigger, skip over the `default` case
// statement and add the section.
case s.Images != nil:
case s.Facts != nil:
case s.HeroImage != nil:
case s.StartGroup:
case s.Markdown:
case s.ActivityText != "":
case s.ActivitySubtitle != "":
case s.ActivityTitle != "":
case s.ActivityImage != "":
case s.Text != "":
case s.Title != "":
default:
return fmt.Errorf("all fields found to be at zero-value, skipping section")
}
mc.Sections = append(mc.Sections, s)
}
return nil
}
// AddFact adds one or many additional MessageCardSectionFact values to a
// MessageCardSection
func (mcs *MessageCardSection) AddFact(fact ...MessageCardSectionFact) error |
// AddFactFromKeyValue accepts a key and slice of values and converts them to
// MessageCardSectionFact values
func (mcs *MessageCardSection) AddFactFromKeyValue(key string, values ...string) error {
// validate arguments
if key == "" {
return errors.New("empty key received for new fact")
}
if len(values) < 1 {
return errors.New("no values received for new fact")
}
fact := MessageCardSectionFact{
Name: key,
Value: strings.Join(values, ", "),
}
// TODO: Explicitly define or use constructor?
// fact := NewMessageCardSectionFact()
// fact.Name = key
// fact.Value = strings.Join(values, ", ")
mcs.Facts = append(mcs.Facts, fact)
// if we made it this far then all should be well
return nil
}
// AddImage adds an image to a MessageCard section. These images are used to
// provide a photo gallery inside a MessageCard section.
func (mcs *MessageCardSection) AddImage(sectionImage ...MessageCardSectionImage) error {
for i := range sectionImage {
if sectionImage[i].Image == "" {
return fmt.Errorf("cannot add empty image URL")
}
if sectionImage[i].Title == "" {
return fmt.Errorf("cannot add empty image title")
}
mcs.Images = append(mcs.Images, §ionImage[i])
}
return nil
}
// AddHeroImageStr adds a Hero Image to a MessageCard section using string
// arguments. This image is used as the centerpiece or banner of a message
// card.
func (mcs *MessageCardSection) AddHeroImageStr(imageURL string, imageTitle string) error {
if imageURL == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if imageTitle == "" {
return fmt.Errorf("cannot add empty hero image title")
}
heroImage := MessageCardSectionImage{
Image: imageURL,
Title: imageTitle,
}
// TODO: Explicitly define or use constructor?
// heroImage := NewMessageCardSectionImage()
// heroImage.Image = imageURL
// heroImage.Title = imageTitle
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// AddHeroImage adds a Hero Image to a MessageCard section using a
// MessageCardSectionImage argument. This image is used as the centerpiece or
// banner of a message card.
func (mcs *MessageCardSection) AddHeroImage(heroImage MessageCardSectionImage) error {
if heroImage.Image == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if heroImage.Title == "" {
return fmt.Errorf("cannot add empty hero image title")
}
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// NewMessageCard creates a new message card with fields required by the
// legacy message card format already predefined
func NewMessageCard() MessageCard {
// define expected values to meet Office 365 Connector card requirements
// https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#card-fields
msgCard := MessageCard{
Type: "MessageCard",
Context: "https://schema.org/extensions",
}
return msgCard
}
// NewMessageCardSection creates an empty message card section
func NewMessageCardSection() *MessageCardSection {
msgCardSection := MessageCardSection{}
return &msgCardSection
}
// NewMessageCardSectionFact creates an empty message card section fact
func NewMessageCardSectionFact() MessageCardSectionFact {
msgCardSectionFact := MessageCardSectionFact{}
return msgCardSectionFact
}
// NewMessageCardSectionImage creates an empty image for use with message card
// section
func NewMessageCardSectionImage() MessageCardSectionImage {
msgCardSectionImage := MessageCardSectionImage{}
return msgCardSectionImage
}
| {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
} | identifier_body |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Value is the value for an associated key in a key/value pair
Value string `json:"value"`
}
// MessageCardSectionImage represents an image as used by the heroImage and
// images properties of a section.
type MessageCardSectionImage struct {
// Image is the URL to the image.
Image string `json:"image"`
// Title is a short description of the image. Typically, this description
// is displayed in a tooltip as the user hovers their mouse over the
// image.
Title string `json:"title"`
}
// MessageCardSection represents a section to include in a message card.
type MessageCardSection struct {
// Title is the title property of a section. This property is displayed
// in a font that stands out, while not as prominent as the card's title.
// It is meant to introduce the section and summarize its content,
// similarly to how the card's title property is meant to summarize the
// whole card.
Title string `json:"title,omitempty"`
// Text is the section's text property. This property is very similar to
// the text property of the card. It can be used for the same purpose.
Text string `json:"text,omitempty"`
// ActivityImage is a property used to display a picture associated with
// the subject of a message card. For example, this might be the portrait
// of a person who performed an activity that the message card is
// associated with.
ActivityImage string `json:"activityImage,omitempty"`
// ActivityTitle is a property used to summarize the activity associated
// with a message card.
ActivityTitle string `json:"activityTitle,omitempty"`
// ActivitySubtitle is a property used to show brief, but extended
// information about an activity associated with a message card. Examples
// include the date and time the associated activity was taken or the
// handle of a person associated with the activity.
ActivitySubtitle string `json:"activitySubtitle,omitempty"`
// ActivityText is a property used to provide details about the activity.
// For example, if the message card is used to deliver updates about a
// topic, then this property would be used to hold the bulk of the content
// for the update notification.
ActivityText string `json:"activityText,omitempty"`
// Markdown represents a toggle to enable or disable Markdown formatting.
// By default, all text fields in a card and its sections can be formatted
// using basic Markdown.
Markdown bool `json:"markdown,omitempty"`
// StartGroup is the section's startGroup property. This property marks
// the start of a logical group of information. Typically, sections with
// startGroup set to true will be visually separated from previous card
// elements.
StartGroup bool `json:"startGroup,omitempty"`
// HeroImage is a property that allows for setting an image as the
// centerpiece of a message card. This property can also be used to add a
// banner to the message card.
// Note: heroImage is not currently supported by Microsoft Teams
// https://stackoverflow.com/a/45389789
// We use a pointer to this type in order to have the json package
// properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
HeroImage *MessageCardSectionImage `json:"heroImage,omitempty"`
// Facts is a collection of MessageCardSectionFact values. A section entry
// usually is displayed in a two-column key/value format.
Facts []MessageCardSectionFact `json:"facts,omitempty"`
// Images is a property that allows for the inclusion of a photo gallery
// inside a section.
// We use a slice of pointers to this type in order to have the json
// package properly omit this field if not explicitly set.
// https://github.com/golang/go/issues/11939
// https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go
// https://stackoverflow.com/questions/33447334/golang-json-marshal-how-to-omit-empty-nested-struct
Images []*MessageCardSectionImage `json:"images,omitempty"`
}
// MessageCard represents a legacy actionable message card used via Office 365
// or Microsoft Teams connectors.
type MessageCard struct {
// Required; must be set to "MessageCard"
Type string `json:"@type"`
// Required; must be set to "https://schema.org/extensions"
Context string `json:"@context"`
// Summary is required if the card does not contain a text property,
// otherwise optional. The summary property is typically displayed in the
// list view in Outlook, as a way to quickly determine what the card is
// all about. Summary appears to only be used when there are sections defined
Summary string `json:"summary,omitempty"`
// Title is the title property of a card. is meant to be rendered in a
// prominent way, at the very top of the card. Use it to introduce the
// content of the card in such a way users will immediately know what to
// expect.
Title string `json:"title,omitempty"`
// Text is required if the card does not contain a summary property,
// otherwise optional. The text property is meant to be displayed in a
// normal font below the card's title. Use it to display content, such as
// the description of the entity being referenced, or an abstract of a
// news article.
Text string `json:"text,omitempty"`
// Specifies a custom brand color for the card. The color will be
// displayed in a non-obtrusive manner.
ThemeColor string `json:"themeColor,omitempty"`
// Sections is a collection of sections to include in the card.
Sections []*MessageCardSection `json:"sections,omitempty"`
}
// AddSection adds one or many additional MessageCardSection values to a
// MessageCard. Validation is performed to reject invalid values with an error
// message.
func (mc *MessageCard) AddSection(section ...*MessageCardSection) error {
for _, s := range section {
// bail if a completely nil section provided
if s == nil {
return fmt.Errorf("func AddSection: nil MessageCardSection received")
}
// Perform validation of all MessageCardSection fields in an effort to
// avoid adding a MessageCardSection with zero value fields. This is
// done to avoid generating an empty sections JSON array since the
// Sections slice for the MessageCard type would technically not be at
// a zero value state. Due to this non-zero value state, the
// encoding/json package would end up including the Sections struct
// field in the output JSON.
// See also https://github.com/golang/go/issues/11939
switch {
// If any of these cases trigger, skip over the `default` case
// statement and add the section.
case s.Images != nil:
case s.Facts != nil:
case s.HeroImage != nil:
case s.StartGroup:
case s.Markdown:
case s.ActivityText != "":
case s.ActivitySubtitle != "":
case s.ActivityTitle != "":
case s.ActivityImage != "":
case s.Text != "":
case s.Title != "":
default:
return fmt.Errorf("all fields found to be at zero-value, skipping section")
}
mc.Sections = append(mc.Sections, s)
}
return nil
}
// AddFact adds one or many additional MessageCardSectionFact values to a
// MessageCardSection
func (mcs *MessageCardSection) AddFact(fact ...MessageCardSectionFact) error {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
}
// AddFactFromKeyValue accepts a key and slice of values and converts them to
// MessageCardSectionFact values
func (mcs *MessageCardSection) AddFactFromKeyValue(key string, values ...string) error {
// validate arguments
if key == "" {
return errors.New("empty key received for new fact")
}
if len(values) < 1 {
return errors.New("no values received for new fact")
}
fact := MessageCardSectionFact{
Name: key,
Value: strings.Join(values, ", "),
}
// TODO: Explicitly define or use constructor?
// fact := NewMessageCardSectionFact()
// fact.Name = key
// fact.Value = strings.Join(values, ", ")
mcs.Facts = append(mcs.Facts, fact)
// if we made it this far then all should be well
return nil
}
// AddImage adds an image to a MessageCard section. These images are used to
// provide a photo gallery inside a MessageCard section.
func (mcs *MessageCardSection) AddImage(sectionImage ...MessageCardSectionImage) error {
for i := range sectionImage {
if sectionImage[i].Image == "" {
return fmt.Errorf("cannot add empty image URL")
}
if sectionImage[i].Title == "" {
return fmt.Errorf("cannot add empty image title")
}
mcs.Images = append(mcs.Images, §ionImage[i])
}
return nil
}
// AddHeroImageStr adds a Hero Image to a MessageCard section using string
// arguments. This image is used as the centerpiece or banner of a message
// card.
func (mcs *MessageCardSection) AddHeroImageStr(imageURL string, imageTitle string) error {
if imageURL == "" {
return fmt.Errorf("cannot add empty hero image URL")
}
if imageTitle == "" {
return fmt.Errorf("cannot add empty hero image title")
}
heroImage := MessageCardSectionImage{
Image: imageURL,
Title: imageTitle,
}
// TODO: Explicitly define or use constructor?
// heroImage := NewMessageCardSectionImage()
// heroImage.Image = imageURL
// heroImage.Title = imageTitle
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// AddHeroImage adds a Hero Image to a MessageCard section using a
// MessageCardSectionImage argument. This image is used as the centerpiece or
// banner of a message card.
func (mcs *MessageCardSection) AddHeroImage(heroImage MessageCardSectionImage) error {
if heroImage.Image == "" |
if heroImage.Title == "" {
return fmt.Errorf("cannot add empty hero image title")
}
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// NewMessageCard creates a new message card with fields required by the
// legacy message card format already predefined
func NewMessageCard() MessageCard {
// define expected values to meet Office 365 Connector card requirements
// https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#card-fields
msgCard := MessageCard{
Type: "MessageCard",
Context: "https://schema.org/extensions",
}
return msgCard
}
// NewMessageCardSection creates an empty message card section
func NewMessageCardSection() *MessageCardSection {
msgCardSection := MessageCardSection{}
return &msgCardSection
}
// NewMessageCardSectionFact creates an empty message card section fact
func NewMessageCardSectionFact() MessageCardSectionFact {
msgCardSectionFact := MessageCardSectionFact{}
return msgCardSectionFact
}
// NewMessageCardSectionImage creates an empty image for use with message card
// section
func NewMessageCardSectionImage() MessageCardSectionImage {
msgCardSectionImage := MessageCardSectionImage{}
return msgCardSectionImage
}
| {
return fmt.Errorf("cannot add empty hero image URL")
} | conditional_block |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
},
Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64,
},
}
}
fn | (v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
}
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc != 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc != 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut()) != 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
) != 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
}
| timespec_to_opt_duration | identifier_name |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
},
Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64,
},
}
}
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => |
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc != 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc != 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut()) != 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
) != 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
}
| {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
} | conditional_block |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
}, | },
}
}
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
}
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc != 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc != 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut()) != 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
) != 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
} | Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64, | random_line_split |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string = "hash.zlbf"
//var PATH = "";
//生成sql文件
var ORACLEBAKSQLPATHTL string = "oracle/backdirsql.tl"
var ORACLEBAKSQLPATH string = "bat/backdir.sql"
//生成执行sql文件
var ORACLEBAKPATHTL string = "oracle/backdir.tl"
var ORACLEBAKPATH string = "bat/backdir.bat"
//生成执行备份文件
var ORACLEBAKBATPATHTL string = "oracle/oracle.tl"
var ORACLEBAKBATPATH string = "bat/oracle.bat"
//定时任务
var SCHTASKSPATHTL string = "oracle/schtasks.tl"
var SCHTASKSPATH string = "bat/schtasks.bat"
//start.bat
var STARTPATHTL string = "oracle/start.tl"
var STARTPATH string = "bat/start.bat"
//设置文件
type Backinfo struct {
TargetPath string
BackPath []string
FtpIp string
FtpUserName string
FtpPassWord string
OracleBakPath string
OracleURL string
BackFileName string
}
func main() {
//dir2 , err2 := getCurrentDirectory()
//if err2 != nil {
// logger.Println("获取当前目录失败" + err2.Error() )
//
//}
//
//logger.Println("文件目录: " + dir2 )
//
info, err := readBackInfoContent()
if err != nil {
logger.Println("读取配置文件错误文件内容错误: " + err.Error())
}
//initHashfile()
if !checkFileIsExist(ORACLEBAKSQLPATH) {
initbak(info)
}
if len(os.Args) > 1 {
cmd := os.Args[1]
logger.Println("cmd " + cmd)
switch cmd {
case "o":
BakOracleBat(info.OracleBakPath)
case "f":
BakFiles(info)
default:
//logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
} else {
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
os.Exit(0)
}
//
func initHashfile() error {
t := time.Now()
if 1 == t.Day() { //第一天
err := os.Remove(hashFileName) //删除文件test.txt
if err != nil {
logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
}
}
//if err := createHashFile(); err != nil {
// logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
// return err
//}
return nil
}
func initbak(info Backinfo) error {
OracleBakPath := strings.Replace(info.OracleBakPath, "/", "\\", -1)
var err = TemplateSaveFile(ORACLEBAKSQLPATHTL, ORACLEBAKSQLPATH, OracleBakPath)
if err != nil {
logger.Println("生成oracledir.sql 失败" + err.Error())
}
dir, err1 := getCurrentDirectory()
if err1 != nil {
logger.Println("获取当前目录失败" + err1.Error())
}
//logger.Println("ssssssssssss" + dir)
oracledir := map[string] |
}
err = TemplateSaveFile(ORACLEBAKPATHTL, ORACLEBAKPATH, oracledir)
if err != nil {
logger.Println("生成oracledir.bat 失败" + err.Error())
}
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
if oracledatatmp == nil || len(oracledatatmp) < 2 {
logger.Println("读取oracle配置信息失败")
}
oracleddata := map[string]string{
"OracleBakPath": OracleBakPath,
"UserName": oracledatatmp[0],
"PassWord": oracledatatmp[1],
"DBName": oracledatatmp[2],
}
err = TemplateSaveFile(ORACLEBAKBATPATHTL, ORACLEBAKBATPATH, oracleddata)
if err != nil {
logger.Println("生成oracle.bat 失败" + err.Error())
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
baktime := fmt.Sprintf("0%d:%d%d", r.Intn(6), r.Intn(5), r.Intn(9))
logger.Println(baktime)
schtasks := map[string]string{
"dir": dir,
"time": baktime,
}
err = TemplateSaveFile(SCHTASKSPATHTL, SCHTASKSPATH, schtasks)
if err != nil {
logger.Println("生成schtasks.bat 失败" + err.Error())
}
err = TemplateSaveFile(STARTPATHTL, STARTPATH, dir)
if err != nil {
logger.Println("生成start.bat 失败" + err.Error())
}
err = execu(SCHTASKSPATH)
if err != nil {
logger.Println("运行schtasks.bat 失败" + err.Error())
return err
}
return nil
}
//oracleback
func BakOracleBat(oraclepath string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
if !checkFileIsExist(filepath.Join(dir, oraclepath)) {
err := execu(ORACLEBAKPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKPATH + err.Error())
return err
}
}
err = execu(ORACLEBAKBATPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKBATPATH + err.Error())
return err
}
return nil
}
func BakFiles(info Backinfo) error {
var xcplasttime = time.Now().AddDate(0, 0, -1).Format("01-02-2006")
var lasttime = time.Now().Format("2006-01-02")
var lastmoth = time.Now().Format("2006-01")
if !checkFileIsExist(hashFileName) {
if err := createHashFile(); err != nil {
logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
//return err
}
xcplasttime = "01-02-2006"
}
if err := tarpath(info, lasttime, xcplasttime); err != nil {
logger.Println("复制文件失败" + err.Error())
}
if err := zipfiles(info.TargetPath, lasttime); err != nil {
logger.Println("压缩文件失败" + err.Error())
}
var remoteSavePath = lastmoth + "^" + strings.Replace(get_external(info.BackFileName), ".", "-", -1)
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
files, _ := ioutil.ReadDir(info.TargetPath)
for _, file := range files {
if file.IsDir() {
continue
} else {
logger.Println(file.Name())
ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, filepath.Join(info.TargetPath, file.Name()), remoteSavePath, oracledatatmp[0]+file.Name())
}
}
//var localFile = filepath.Join(info.TargetPath, lasttime+".7z")
//
//var oracledatatmp []string = strings.Split(info.OracleURL, "@")
//
//
//logger.Println("压缩文件", remoteSavePath, lasttime+".7z")
//
//var err = ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//
//if err != nil {
// logger.Println("上传ftp文件失败" + err.Error())
// //ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//}
os.RemoveAll(info.TargetPath)
//return err
return nil
}
//读取back.json的配置文件
func readBackInfoContent() (Backinfo, error) {
file, err := os.Open(backFilePath)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
defer func() {
err := file.Close()
if err != nil {
logger.Println("close指纹文件 " + backFilePath + " 失败: " + err.Error())
}
}()
jsonContent, err := ioutil.ReadAll(file)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
//content := make(&backinfo)
var backinfo Backinfo
err = json.Unmarshal(jsonContent, &backinfo)
if err != nil {
logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
}
logger.Println(backinfo.BackPath[0])
return backinfo, err
}
//
//创建指纹文件
func createHashFile() error {
defer func() {
if err_p := recover(); err_p != nil {
logger.Println("createHashFile模块出错")
}
}()
var hashFilePath = filepath.Join(hashFileName)
if !checkFileIsExist(hashFilePath) {
err := ioutil.WriteFile(hashFilePath, []byte("{}"), 0777)
if err != nil {
logger.Println("创建指纹文件失败: " + err.Error())
return err
}
}
return nil
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func checkFileIsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
//func backpath(list []string ,target string ) error {
//
// var hashMapContent ,err = readFileContent() // 读取指纹
//
// if(err != nil) {
// return err
// }
//
//
// for _, value := range list {
// //var targetPath = ""
// filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
// var partPath, _ = filepath.Rel(value, path)
//
// var targetPath = filepath.Join(target, partPath)
//
// //path:原始文件地址,targetPath:备份文件地址
// //每个path都需要去比对md5文件中做比对,判断文件是否被修改过
// //如果文件是个目录则不写入指纹文件
// if f.IsDir() {
// copyFile(path, targetPath)
// } else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
// }
// return nil
// })
// }
//
// var hashFilePath = filepath.Join(hashFileName)
// writeFileContent(hashMapContent, hashFilePath)
// //释放读取的指纹文件内存
// hashMapContent = nil
//
// return nil
//
//}
//
func tarpath(backinfo Backinfo, lasttime, time string) error {
//var hashMapContent, err = readFileContent() // 读取指纹
//
//if err != nil {
// return err
//}
for _, value := range backinfo.BackPath {
////var targetPath = ""
//filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
var err = xcopy(value, backinfo.TargetPath+"/"+lasttime+"/", time)
if err != nil {
return err
}
logger.Println("执行xcopy: " + value + backinfo.TargetPath + "/" + lasttime + "/")
//var partPath, _ = filepath.Rel(value, path)
//
//var targetPath = filepath.Join(backinfo.TargetPath, time, partPath)
//
////path:原始文件地址,targetPath:备份文件地址
////每个path都需要去比对md5文件中做比对,判断文件是否被修改过
////如果文件是个目录则不写入指纹文件
//if f.IsDir() {
// copyFile(path, targetPath)
//} else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
//}
//return nil
//})
}
//var hashFilePath = filepath.Join(hashFileName)
//err = writeFileContent(hashMapContent, hashFilePath)
//if err != nil {
// logger.Println("写入指纹文件失败" + err.Error())
//return err
//
//}
////释放读取的指纹文件内存
//hashMapContent = nil
return nil
}
func zipfiles(targetPath string, time string) error {
//压缩文件
var zip = filepath.Join(targetPath, time+".7z")
var ziptargetPath = filepath.Join(targetPath, time)
var err = compress7zip(ziptargetPath, zip)
if err != nil {
return err
}
////压缩文件
//var hashFilePath = filepath.Join(hashFileName)
//err = compress7zip(hashFilePath, ziptargetPath)
//
//if err != nil {
// return err
//}
return nil
}
//
//func copyFile(basePath, targetPath string) {
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("copyFile模块出错")
// }
// }()
//
// baseStat, err := os.Stat(basePath)
// if err != nil {
// logger.Panicln("需要备份的文件检测失败,文件出现问题,无法复制")
// return
// }
// //targetStat, err := os.Stat(targetPath)
// _, err = os.Stat(targetPath)
// if err != nil {
// if os.IsNotExist(err) {
// //如果目标文件不存在
// if baseStat.IsDir() {
// //如果缺失的是一个空目录
// errMkDir := os.MkdirAll(targetPath, 0777)
// if errMkDir != nil {
// logger.Println("创建目录 " + targetPath + " 失败")
// }
// } else {
// //如果缺失的是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// } else {
// return
// }
// } else {
// //如果目标文件存在
// if baseStat.IsDir() {
// //如果是一个空目录
// } else {
// //如果是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// }
//
//}
//
////复制文件内容
//func copyFileContent(basePath, targetPath string) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("copyFileContent模块出错")
// }
// }()
//
// baseFile, err := os.Open(basePath)
// if err != nil {
// logger.Println("读取文件 " + basePath + " 失败")
// return
// }
// defer func() {
// err := baseFile.Close()
// if err != nil {
// logger.Println("close文件 " + basePath + " 失败: " + err.Error())
// }
// }()
// targetFile, err := os.Create(targetPath)
// if err != nil {
// logger.Println("创建文件 " + targetPath + " 失败: " + err.Error())
// return
// }
// defer func() {
// err := targetFile.Close()
// if err != nil {
// logger.Println("close文件 " + targetPath + " 失败: " + err.Error())
// }
// }()
// copyData, err := io.Copy(targetFile, baseFile)
// if err != nil {
// logger.Println("复制文件文件 " + basePath + " 失败: " + err.Error())
// }
// logger.Println("正在复制文件: " + basePath + " 大小为: " + strconv.FormatInt(copyData, 10))
//}
//
////读取整个指纹文件到内存
//func readFileContent() (*map[string]uint32, error) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("readFileContent模块出错")
// }
// }()
// var hashFilePath = filepath.Join(hashFileName)
//
// file, err := os.Open(hashFilePath)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
//
// defer func() {
// err := file.Close()
// if err != nil {
// logger.Println("close指纹文件 " + hashFilePath + " 失败: " + err.Error())
// }
// }()
//
// jsonContent, err := ioutil.ReadAll(file)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
// content := make(map[string]uint32)
// err = json.Unmarshal(jsonContent, &content)
// if err != nil {
// logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// }
// return &content, err
//}
//
////生成文件的md5
//func makeFileMd5(filePath string) uint32 {
//
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("makeFileMd5模块出错")
// }
// }()
//
// b, err := ioutil.ReadFile(filePath)
//
// if err != nil {
//
// logger.Println("makefilemd5读取文件失败: " + err.Error())
//
// //return 0
// //_, ok := err.(*os.PathError)
// //if ok {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //} else {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //}
// }
//
// return adler32.Checksum(b)
//}
//
////比对指纹文件中的md5和新读取文件的md5
//func comparedFileMd5(mapContent *map[string]uint32, md5 uint32, path string) bool {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("comparedFileMd5模块出错")
// }
// }()
//
// if contentMd5, ok := (*mapContent)[path]; ok {
// //如果md5存在,且不相同,则代表文件更新过,更新md5值,并且复制文件
// if md5 != contentMd5 {
// (*mapContent)[path] = md5
// return true
// } else {
// return false
// }
// } else {
// //如果md5不存在,则写入新的path
// (*mapContent)[path] = md5
// return true
// }
//}
//
////写入指纹文件
//func writeFileContent(mapContent *map[string]uint32, path string) error {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("writeFileContent模块出错")
// }
// }()
//
// jsonContent, err := json.Marshal(*mapContent)
//
// if err != nil {
// logger.Println("指纹文件 json Marshal 失败: " + err.Error())
// return err
// }
// err = ioutil.WriteFile(path, jsonContent, 0777)
// if err != nil {
// logger.Println("写入指纹文件失败: " + err.Error())
// return err
// }
// return nil
//}
//调用7zip压缩
func compress7zip(frm, dst string) error {
cmd := exec.Command("7z/7z.exe", "a", "-mx=1", "-v5g", dst, frm)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行7zip压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
logger.Println("in all caps: %s\n", out.String())
return nil
}
//调用7zip压缩
func xcopy(frm, dst, time string) error {
frm = strings.Replace(frm, "/", "\\", -1)
dst = strings.Replace(dst, "/", "\\", -1)
cmd := exec.Command("xcopy", frm, dst, "/s", "/e", "/y", "/d:"+time)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
//var out bytes.Buffer
//cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行xcopy压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
//logger.Printf("in all caps: %q\n", out.String())
return nil
}
func execu(name string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
cmd := exec.Command("cmd", "/C", dir+"\\"+name)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logger.Println(err.Error() + ": " + stderr.String())
return err
}
//logger.Println("Result: " + out.String())
return nil
}
//
//// 参数frm可以是文件或目录,不会给dst添加.zip扩展名
//func compress(frm, dst string) error {
// buf := bytes.NewBuffer(make([]byte, 0, 10*1024*1024)) // 创建一个读写缓冲
// myzip := zip.NewWriter(buf) // 用压缩器包装该缓冲
// // 用Walk方法来将所有目录下的文件写入zip
// err := filepath.Walk(frm, func(path string, info os.FileInfo, err error) error {
// var file []byte
// if err != nil {
// return filepath.SkipDir
// }
// header, err := zip.FileInfoHeader(info) // 转换为zip格式的文件信息
// if err != nil {
// return filepath.SkipDir
// }
// header.Name, _ = filepath.Rel(filepath.Dir(frm), path)
// if !info.IsDir() {
// // 确定采用的压缩算法(这个是内建注册的deflate)
// header.Method = 8
// file, err = ioutil.ReadFile(path) // 获取文件内容
// if err != nil {
// return filepath.SkipDir
// }
// } else {
// file = nil
// }
// // 上面的部分如果出错都返回filepath.SkipDir
// // 下面的部分如果出错都直接返回该错误
// // 目的是尽可能的压缩目录下的文件,同时保证zip文件格式正确
// w, err := myzip.CreateHeader(header) // 创建一条记录并写入文件信息
// if err != nil {
// return err
// }
// _, err = w.Write(file) // 非目录文件会写入数据,目录不会写入数据
// if err != nil { // 因为目录的内容可能会修改
// return err // 最关键的是我不知道咋获得目录文件的内容
// }
// return nil
// })
// if err != nil {
// return err
// }
// myzip.Close() // 关闭压缩器,让压缩器缓冲中的数据写入buf
// file, err := os.Create(dst) // 建立zip文件
// if err != nil {
// return err
// }
// defer file.Close()
// _, err = buf.WriteTo(file) // 将buf中的数据写入文件
// if err != nil {
// return err
// }
// return nil
//}
func ftpUploadFile(ftpserver, ftpuser, pw, localFile, remoteSavePath, saveName string) error {
ftp, err := ftp.Connect(ftpserver)
if err != nil {
logger.Println(err)
return nil
}
err = ftp.Login(ftpuser, pw)
if err != nil {
logger.Println(err)
return nil
}
//
//err = ftp.Delete(remoteSavePath +"/"+ saveName)
//
//if err != nil {
// logger.Println("删除文件失败")
//}
err = ftp.ChangeDir(remoteSavePath)
if err != nil {
logger.Println(err)
//return nil
err = ftp.MakeDir(remoteSavePath)
if err != nil {
logger.Println(err)
return nil
}
ftp.ChangeDir(remoteSavePath)
}
dir, err := ftp.CurrentDir()
if err != nil {
logger.Println(err)
return nil
}
logger.Println(dir)
file, err := os.Open(localFile)
if err != nil {
logger.Println(err)
}
defer file.Close()
err = ftp.Stor(saveName, file)
if err != nil {
//ftp.Delete(saveName)
logger.Println(err)
return err
}
ftp.Logout()
ftp.Quit()
logger.Println("success upload file:", localFile)
return err
}
func get_external(FileName string) string {
data := FileName
if data != "" {
return data
}
resp, err := http.Get("http://myexternalip.com/raw")
if err != nil {
return ""
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
data = GetIntranetIp()
}
//buf := new(bytes.Buffer)
//buf.ReadFrom(resp.Body)
//s := buf.String()
data = strings.Replace(string(content), ".", "-", -1)
data = strings.Replace(data, ":", "-", -1)
return data
}
func GetIntranetIp() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
logger.Println(err)
}
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
//logger.Println("ip:", ipnet.IP.String())
}
}
}
return ""
}
func getCurrentDirectory() (string, error) {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", err
}
return dir, nil
}
func TemplateSaveFile(tlpath, savepath string, datas interface{}) error {
tmpl, err := TemplateInit(tlpath)
if err != nil {
logger.Println("加载模板:" + tlpath + err.Error())
return err
}
data, err := TemplateExecute(tmpl, datas)
if err != nil {
logger.Println("生成模板:" + tlpath + err.Error())
return err
}
f, err := os.Create(savepath) //创建文件
if err != nil {
logger.Println("打开:" + savepath + err.Error())
return err
}
defer f.Close()
n, err := io.WriteString(f, data) //写入文件(字符串)
logger.Println("写入 %d 个字节n", n)
if err != nil {
logger.Println("生成:" + savepath + err.Error())
return err
}
return nil
}
func TemplateInit(templatePath string) (*template.Template, error) {
content, err := ioutil.ReadFile(templatePath)
if err != nil {
return nil, err
}
tmpl, err := template.New("test").Parse(string(content))
if err != nil {
logger.Println("file error: %v", err)
return nil, err
}
return tmpl, nil
}
func TemplateExecute(tmpl *template.Template, data interface{}) (string, error) {
buf := new(bytes.Buffer)
err := tmpl.Execute(buf, data)
if err != nil {
logger.Println("tmplate error: %v", err)
return "", err
}
return buf.String(), nil
}
| string{
"Dir": dir,
"OracleBakPath": OracleBakPath, | conditional_block |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string = "hash.zlbf"
//var PATH = "";
//生成sql文件
var ORACLEBAKSQLPATHTL string = "oracle/backdirsql.tl"
var ORACLEBAKSQLPATH string = "bat/backdir.sql"
//生成执行sql文件
var ORACLEBAKPATHTL string = "oracle/backdir.tl"
var ORACLEBAKPATH string = "bat/backdir.bat"
//生成执行备份文件
var ORACLEBAKBATPATHTL string = "oracle/oracle.tl"
var ORACLEBAKBATPATH string = "bat/oracle.bat"
//定时任务
var SCHTASKSPATHTL string = "oracle/schtasks.tl"
var SCHTASKSPATH string = "bat/schtasks.bat"
//start.bat
var STARTPATHTL string = "oracle/start.tl"
var STARTPATH string = "bat/start.bat"
//设置文件
type Backinfo struct {
TargetPath string
BackPath []string
FtpIp string
FtpUserName string
FtpPassWord string
OracleBakPath string
OracleURL string
BackFileName string
}
func main() {
//dir2 , err2 := getCurrentDirectory()
//if err2 != nil {
// logger.Println("获取当前目录失败" + err2.Error() )
//
//}
//
//logger.Println("文件目录: " + dir2 )
//
info, err := readBackInfoContent()
if err != nil {
logger.Println("读取配置文件错误文件内容错误: " + err.Error())
}
//initHashfile()
if !checkFileIsExist(ORACLEBAKSQLPATH) {
initbak(info)
}
if len(os.Args) > 1 {
cmd := os.Args[1]
logger.Println("cmd " + cmd)
switch cmd {
case "o":
BakOracleBat(info.OracleBakPath)
case "f":
BakFiles(info)
default:
//logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
} else {
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
os.Exit(0)
}
//
func initHashfile() error {
t := time.Now()
if 1 == t.Day() { //第一天
err := os.Remove(hashFileName) //删除文件test.txt
if err != nil {
logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
}
}
//if err := createHashFile(); err != nil {
// logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
// return err
//}
return nil
}
func initbak(info Backinfo) error {
OracleBakPath := strings.Replace(info.OracleBakPath, "/", "\\", -1)
var err = TemplateSaveFile(ORACLEBAKSQLPATHTL, ORACLEBAKSQLPATH, OracleBakPath)
if err != nil {
logger.Println("生成oracledir.sql 失败" + err.Error())
}
dir, err1 := getCurrentDirectory()
if err1 != nil {
logger.Println("获取当前目录失败" + err1.Error())
}
//logger.Println("ssssssssssss" + dir)
oracledir := map[string]string{
"Dir": dir,
"OracleBakPath": OracleBakPath,
}
err = TemplateSaveFile(ORACLEBAKPATHTL, ORACLEBAKPATH, oracledir)
if err != nil {
logger.Println("生成oracledir.bat 失败" + err.Error())
}
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
if oracledatatmp == nil || len(oracledatatmp) < 2 {
logger.Println("读取oracle配置信息失败")
}
oracleddata := map[string]string{
"OracleBakPath": OracleBakPath,
"UserName": oracledatatmp[0],
"PassWord": oracledatatmp[1],
"DBName": oracledatatmp[2],
}
err = TemplateSaveFile(ORACLEBAKBATPATHTL, ORACLEBAKBATPATH, oracleddata)
if err != nil {
logger.Println("生成oracle.bat 失败" + err.Error())
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
baktime := fmt.Sprintf("0%d:%d%d", r.Intn(6), r.Intn(5), r.Intn(9))
logger.Println(baktime)
schtasks := map[string]string{
"dir": dir,
"time": baktime,
}
err = TemplateSaveFile(SCHTASKSPATHTL, SCHTASKSPATH, schtasks)
if err != nil {
logger.Println("生成schtasks.bat 失败" + err.Error())
}
err = TemplateSaveFile(STARTPATHTL, STARTPATH, dir)
if err != nil {
logger.Println("生成start.bat 失败" + err.Error())
}
err = execu(SCHTASKSPATH)
if err != nil {
logger.Println("运行schtasks.bat 失败" + err.Error())
return err
}
return nil
}
//oracleback
func BakOracleBat(oraclepath string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
if !checkFileIsExist(filepath.Join(dir, oraclepath)) {
err := execu(ORACLEBAKPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKPATH + err.Error())
return err
}
}
err = execu(ORACLEBAKBATPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKBATPATH + err.Error())
return err
}
return nil
}
func BakFiles(info Backinfo) error {
var xcplasttime = time.Now().AddDate(0, 0, -1).Format("01-02-2006")
var lasttime = time.Now().Format("2006-01-02")
var lastmoth = time.Now().Format("2006-01")
if !checkFileIsExist(hashFileName) {
if err := createHashFile(); err != nil {
logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
//return err
}
xcplasttime = "01-02-2006"
}
if err := tarpath(info, lasttime, xcplasttime); err != nil {
logger.Println("复制文件失败" + err.Error())
}
if err := zipfiles(info.TargetPath, lasttime); err != nil {
logger.Println("压缩文件失败" + err.Error())
}
var remoteSavePath = lastmoth + "^" + strings.Replace(get_external(info.BackFileName), ".", "-", -1)
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
files, _ := ioutil.ReadDir(info.TargetPath)
for _, file := range files {
if file.IsDir() {
continue
} else {
logger.Println(file.Name())
ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, filepath.Join(info.TargetPath, file.Name()), remoteSavePath, oracledatatmp[0]+file.Name())
}
}
//var localFile = filepath.Join(info.TargetPath, lasttime+".7z")
//
//var oracledatatmp []string = strings.Split(info.OracleURL, "@")
//
//
//logger.Println("压缩文件", remoteSavePath, lasttime+".7z")
//
//var err = ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//
//if err != nil {
// logger.Println("上传ftp文件失败" + err.Error())
// //ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//}
os.RemoveAll(info.TargetPath)
//return err
return nil
}
//读取back.json的配置文件
func readBackInfoContent() (Backinfo, error) {
file, err := os.Open(backFilePath)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
defer func() {
err := file.Close()
if err != nil {
logger.Println("close指纹文件 " + backFilePath + " 失败: " + err.Error())
}
}()
jsonContent, err := ioutil.ReadAll(file)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
//content := make(&backinfo)
var backinfo Backinfo
err = json.Unmarshal(jsonContent, &backinfo)
if err != nil {
logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
}
logger.Println(backinfo.BackPath[0])
return backinfo, err
}
//
//创建指纹文件
func createHashFile() error {
defer func() {
if err_p := recover(); err_p != nil {
logger.Println("createHashFile模块出错")
}
}()
var hashFilePath = filepath.Join(hashFileName)
if !checkFileIsExist(hashFilePath) {
err := ioutil.WriteFile(hashFilePath, []byte("{}"), 0777)
if err != nil {
logger.Println("创建指纹文件失败: " + err.Error())
return err
}
}
return nil
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func checkFileIsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
//func backpath(list []string ,target string ) error {
//
// var hashMapContent ,err = readFileContent() // 读取指纹
//
// if(err != nil) {
// return err
// }
//
//
// for _, value := range list {
// //var targetPath = ""
// filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
// var partPath, _ = filepath.Rel(value, path)
//
// var targetPath = filepath.Join(target, partPath)
//
// //path:原始文件地址,targetPath:备份文件地址
// //每个path都需要去比对md5文件中做比对,判断文件是否被修改过
// //如果文件是个目录则不写入指纹文件
// if f.IsDir() {
// copyFile(path, targetPath)
// } else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
// }
// return nil
// })
// }
//
// var hashFilePath = filepath.Join(hashFileName)
// writeFileContent(hashMapContent, hashFilePath)
// //释放读取的指纹文件内存
// hashMapContent = nil
//
// return nil
//
//}
//
func tarpath(backinfo Backinfo, lasttime, time string) error {
//var hashMapContent, err = readFileContent() // 读取指纹
//
//if err != nil {
// return err
//}
for _, value := range backinfo.BackPath {
////var targetPath = ""
//filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
var err = xcopy(value, backinfo.TargetPath+"/"+lasttime+"/", time)
if err != nil {
return err
}
logger.Println("执行xcopy: " + value + backinfo.TargetPath + "/" + lasttime + "/")
//var partPath, _ = filepath.Rel(value, path)
//
//var targetPath = filepath.Join(backinfo.TargetPath, time, partPath)
//
////path:原始文件地址,targetPath:备份文件地址
////每个path都需要去比对md5文件中做比对,判断文件是否被修改过
////如果文件是个目录则不写入指纹文件
//if f.IsDir() {
// copyFile(path, targetPath)
//} else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
//}
//return nil
//})
}
//var hashFilePath = filepath.Join(hashFileName)
//err = writeFileContent(hashMapContent, hashFilePath)
//if err != nil {
// logger.Println("写入指纹文件失败" + err.Error())
//return err
//
//}
////释放读取的指纹文件内存
//hashMapContent = nil
return nil
}
func zipfiles(targetPath string, time string) error {
//压缩文件
var zip = filepath.Join(targetPath, time+".7z")
var ziptargetPath = filepath.Join(targetPath, time)
var err = compress7zip(ziptargetPath, zip)
if err != nil {
return err
}
////压缩文件
//var hashFilePath = filepath.Join(hashFileName)
//err = compress7zip(hashFilePath, ziptargetPath)
//
//if err != nil {
// return err
//}
return nil
}
//
//func copyFile(basePath, targetPath string) {
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("copyFile模块出错")
// }
// }()
//
// baseStat, err := os.Stat(basePath)
// if err != nil {
// logger.Panicln("需要备份的文件检测失败,文件出现问题,无法复制")
// return
// }
// //targetStat, err := os.Stat(targetPath)
// _, err = os.Stat(targetPath)
// if err != nil {
// if os.IsNotExist(err) {
// //如果目标文件不存在
// if baseStat.IsDir() {
// //如果缺失的是一个空目录
// errMkDir := os.MkdirAll(targetPath, 0777)
// if errMkDir != nil {
// logger.Println("创建目录 " + targetPath + " 失败")
// }
// } else {
// //如果缺失的是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// } else {
// return
// }
// } else {
// //如果目标文件存在
// if baseStat.IsDir() {
// //如果是一个空目录
// } else {
// //如果是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// }
//
//}
//
////复制文件内容
//func copyFileContent(basePath, targetPath string) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("copyFileContent模块出错")
// }
// }()
//
// baseFile, err := os.Open(basePath)
// if err != nil {
// logger.Println("读取文件 " + basePath + " 失败")
// return
// }
// defer func() {
// err := baseFile.Close()
// if err != nil {
// logger.Println("close文件 " + basePath + " 失败: " + err.Error())
// }
// }()
// targetFile, err := os.Create(targetPath)
// if err != nil {
// logger.Println("创建文件 " + targetPath + " 失败: " + err.Error())
// return
// }
// defer func() {
// err := targetFile.Close()
// if err != nil {
// logger.Println("close文件 " + targetPath + " 失败: " + err.Error())
// }
// }()
// copyData, err := io.Copy(targetFile, baseFile)
// if err != nil {
// logger.Println("复制文件文件 " + basePath + " 失败: " + err.Error())
// }
// logger.Println("正在复制文件: " + basePath + " 大小为: " + strconv.FormatInt(copyData, 10))
//}
//
////读取整个指纹文件到内存
//func readFileContent() (*map[string]uint32, error) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("readFileContent模块出错")
// }
// }()
// var hashFilePath = filepath.Join(hashFileName)
//
// file, err := os.Open(hashFilePath)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
//
// defer func() {
// err := file.Close()
// if err != nil {
// logger.Println("close指纹文件 " + hashFilePath + " 失败: " + err.Error())
// }
// }()
//
// jsonContent, err := ioutil.ReadAll(file)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
// content := make(map[string]uint32)
// err = json.Unmarshal(jsonContent, &content)
// if err != nil {
// logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// }
// return &content, err
//}
//
////生成文件的md5
//func makeFileMd5(filePath string) uint32 {
//
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("makeFileMd5模块出错")
// }
// }()
//
// b, err := ioutil.ReadFile(filePath)
//
// if err != nil {
//
// logger.Println("makefilemd5读取文件失败: " + err.Error())
//
// //return 0
// //_, ok := err.(*os.PathError)
// //if ok {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //} else {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //}
// }
//
// return adler32.Checksum(b)
//}
//
////比对指纹文件中的md5和新读取文件的md5
//func comparedFileMd5(mapContent *map[string]uint32, md5 uint32, path string) bool {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("comparedFileMd5模块出错")
// }
// }()
//
// if contentMd5, ok := (*mapContent)[path]; ok {
// //如果md5存在,且不相同,则代表文件更新过,更新md5值,并且复制文件
// if md5 != contentMd5 {
// (*mapContent)[path] = md5
// return true
// } else {
// return false
// }
// } else {
// //如果md5不存在,则写入新的path
// (*mapContent)[path] = md5
// return true
// }
//}
//
////写入指纹文件
//func writeFileContent(mapContent *map[string]uint32, path string) error {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("writeFileContent模块出错")
// }
// }()
//
// jsonContent, err := json.Marshal(*mapContent)
//
// if err != nil {
// logger.Println("指纹文件 json Marshal 失败: " + err.Error())
// return err
// }
// err = ioutil.WriteFile(path, jsonContent, 0777)
// if err != nil {
// logger.Println("写入指纹文件失败: " + err.Error())
// return err
// }
// return nil
//}
//调用7zip压缩
func compress7zip(frm, dst string) error {
cmd := exec.Command("7z/7z.exe", "a", "-mx=1", "-v5g", dst, frm)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行7zip压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
logger.Println("in all caps: %s\n", out.String())
return nil
}
//调用7zip压缩
func xcopy(frm, dst, time string) error {
frm = strings.Replace(frm, "/", "\\", -1)
dst = strings.Replace(dst, "/", "\\", -1)
cmd := exec.Command("xcopy", frm, dst, "/s", "/e", "/y", "/d:"+time)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
//var out bytes.Buffer
//cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行xcopy压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
//logger.Printf("in all caps: %q\n", out.String())
return nil
}
func execu(name string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
cmd := exec.Command("cmd", "/C", dir+"\\"+name)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logger.Println(err.Error() + ": " + stderr.String())
return err
}
//logger.Println("Result: " + out.String())
return nil
}
//
//// 参数frm可以是文件或目录,不会给dst添加.zip扩展名
//func compress(frm, dst string) error {
// buf := bytes.NewBuffer(make([]byte, 0, 10*1024*1024)) // 创建一个读写缓冲
// myzip := zip.NewWriter(buf) // 用压缩器包装该缓冲
// // 用Walk方法来将所有目录下的文件写入zip
// err := filepath.Walk(frm, func( | info os.FileInfo, err error) error {
// var file []byte
// if err != nil {
// return filepath.SkipDir
// }
// header, err := zip.FileInfoHeader(info) // 转换为zip格式的文件信息
// if err != nil {
// return filepath.SkipDir
// }
// header.Name, _ = filepath.Rel(filepath.Dir(frm), path)
// if !info.IsDir() {
// // 确定采用的压缩算法(这个是内建注册的deflate)
// header.Method = 8
// file, err = ioutil.ReadFile(path) // 获取文件内容
// if err != nil {
// return filepath.SkipDir
// }
// } else {
// file = nil
// }
// // 上面的部分如果出错都返回filepath.SkipDir
// // 下面的部分如果出错都直接返回该错误
// // 目的是尽可能的压缩目录下的文件,同时保证zip文件格式正确
// w, err := myzip.CreateHeader(header) // 创建一条记录并写入文件信息
// if err != nil {
// return err
// }
// _, err = w.Write(file) // 非目录文件会写入数据,目录不会写入数据
// if err != nil { // 因为目录的内容可能会修改
// return err // 最关键的是我不知道咋获得目录文件的内容
// }
// return nil
// })
// if err != nil {
// return err
// }
// myzip.Close() // 关闭压缩器,让压缩器缓冲中的数据写入buf
// file, err := os.Create(dst) // 建立zip文件
// if err != nil {
// return err
// }
// defer file.Close()
// _, err = buf.WriteTo(file) // 将buf中的数据写入文件
// if err != nil {
// return err
// }
// return nil
//}
func ftpUploadFile(ftpserver, ftpuser, pw, localFile, remoteSavePath, saveName string) error {
ftp, err := ftp.Connect(ftpserver)
if err != nil {
logger.Println(err)
return nil
}
err = ftp.Login(ftpuser, pw)
if err != nil {
logger.Println(err)
return nil
}
//
//err = ftp.Delete(remoteSavePath +"/"+ saveName)
//
//if err != nil {
// logger.Println("删除文件失败")
//}
err = ftp.ChangeDir(remoteSavePath)
if err != nil {
logger.Println(err)
//return nil
err = ftp.MakeDir(remoteSavePath)
if err != nil {
logger.Println(err)
return nil
}
ftp.ChangeDir(remoteSavePath)
}
dir, err := ftp.CurrentDir()
if err != nil {
logger.Println(err)
return nil
}
logger.Println(dir)
file, err := os.Open(localFile)
if err != nil {
logger.Println(err)
}
defer file.Close()
err = ftp.Stor(saveName, file)
if err != nil {
//ftp.Delete(saveName)
logger.Println(err)
return err
}
ftp.Logout()
ftp.Quit()
logger.Println("success upload file:", localFile)
return err
}
func get_external(FileName string) string {
data := FileName
if data != "" {
return data
}
resp, err := http.Get("http://myexternalip.com/raw")
if err != nil {
return ""
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
data = GetIntranetIp()
}
//buf := new(bytes.Buffer)
//buf.ReadFrom(resp.Body)
//s := buf.String()
data = strings.Replace(string(content), ".", "-", -1)
data = strings.Replace(data, ":", "-", -1)
return data
}
func GetIntranetIp() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
logger.Println(err)
}
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
//logger.Println("ip:", ipnet.IP.String())
}
}
}
return ""
}
func getCurrentDirectory() (string, error) {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", err
}
return dir, nil
}
func TemplateSaveFile(tlpath, savepath string, datas interface{}) error {
tmpl, err := TemplateInit(tlpath)
if err != nil {
logger.Println("加载模板:" + tlpath + err.Error())
return err
}
data, err := TemplateExecute(tmpl, datas)
if err != nil {
logger.Println("生成模板:" + tlpath + err.Error())
return err
}
f, err := os.Create(savepath) //创建文件
if err != nil {
logger.Println("打开:" + savepath + err.Error())
return err
}
defer f.Close()
n, err := io.WriteString(f, data) //写入文件(字符串)
logger.Println("写入 %d 个字节n", n)
if err != nil {
logger.Println("生成:" + savepath + err.Error())
return err
}
return nil
}
func TemplateInit(templatePath string) (*template.Template, error) {
content, err := ioutil.ReadFile(templatePath)
if err != nil {
return nil, err
}
tmpl, err := template.New("test").Parse(string(content))
if err != nil {
logger.Println("file error: %v", err)
return nil, err
}
return tmpl, nil
}
func TemplateExecute(tmpl *template.Template, data interface{}) (string, error) {
buf := new(bytes.Buffer)
err := tmpl.Execute(buf, data)
if err != nil {
logger.Println("tmplate error: %v", err)
return "", err
}
return buf.String(), nil
}
| path string, | identifier_name |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string = "hash.zlbf"
//var PATH = "";
//生成sql文件
var ORACLEBAKSQLPATHTL string = "oracle/backdirsql.tl"
var ORACLEBAKSQLPATH string = "bat/backdir.sql"
//生成执行sql文件
var ORACLEBAKPATHTL string = "oracle/backdir.tl"
var ORACLEBAKPATH string = "bat/backdir.bat"
//生成执行备份文件
var ORACLEBAKBATPATHTL string = "oracle/oracle.tl"
var ORACLEBAKBATPATH string = "bat/oracle.bat"
//定时任务
var SCHTASKSPATHTL string = "oracle/schtasks.tl"
var SCHTASKSPATH string = "bat/schtasks.bat"
//start.bat
var STARTPATHTL string = "oracle/start.tl"
var STARTPATH string = "bat/start.bat"
//设置文件
type Backinfo struct {
TargetPath string
BackPath []string
FtpIp string
FtpUserName string
FtpPassWord string
OracleBakPath string
OracleURL string
BackFileName string
}
func main() {
//dir2 , err2 := getCurrentDirectory()
//if err2 != nil {
// logger.Println("获取当前目录失败" + err2.Error() )
//
//}
//
//logger.Println("文件目录: " + dir2 )
//
info, err := readBackInfoContent()
if err != nil {
logger.Println("读取配置文件错误文件内容错误: " + err.Error())
}
//initHashfile()
if !checkFileIsExist(ORACLEBAKSQLPATH) {
initbak(info)
}
if len(os.Args) > 1 {
cmd := os.Args[1]
logger.Println("cmd " + cmd)
switch cmd {
case "o":
BakOracleBat(info.OracleBakPath)
case "f":
BakFiles(info)
default:
//logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
} else {
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
os.Exit(0)
}
//
func initHashfile() error {
t := time.Now()
if 1 == t.Day() { //第一天
err := os.Remove(hashFileName) //删除文件test.txt
if err != nil {
logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
}
}
//if err := createHashFile(); err != nil {
// logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
// return err
//}
return nil
}
func initbak(info Backinfo) error {
OracleBakPath := strings.Replace(info.OracleBakPath, "/", "\\", -1)
var err = TemplateSaveFile(ORACLEBAKSQLPATHTL, ORACLEBAKSQLPATH, OracleBakPath)
if err != nil {
logger.Println("生成oracledir.sql 失败" + err.Error())
}
dir, err1 := getCurrentDirectory()
if err1 != nil {
logger.Println("获取当前目录失败" + err1.Error())
}
//logger.Println("ssssssssssss" + dir)
oracledir := map[string]string{
"Dir": dir,
"OracleBakPath": OracleBakPath,
}
err = TemplateSaveFile(ORACLEBAKPATHTL, ORACLEBAKPATH, oracledir)
if err != nil {
logger.Println("生成oracledir.bat 失败" + err.Error())
}
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
if oracledatatmp == nil || len(oracledatatmp) < 2 {
logger.Println("读取oracle配置信息失败")
}
oracleddata := map[string]string{
"OracleBakPath": OracleBakPath,
"UserName": oracledatatmp[0],
"PassWord": oracledatatmp[1],
"DBName": oracledatatmp[2],
}
err = TemplateSaveFile(ORACLEBAKBATPATHTL, ORACLEBAKBATPATH, oracleddata)
if err != nil {
logger.Println("生成oracle.bat 失败" + err.Error())
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
baktime := fmt.Sprintf("0%d:%d%d", r.Intn(6), r.Intn(5), r.Intn(9))
logger.Println(baktime)
schtasks := map[string]string{
"dir": dir,
"time": baktime,
}
err = TemplateSaveFile(SCHTASKSPATHTL, SCHTASKSPATH, schtasks)
if err != nil {
logger.Println("生成schtasks.bat 失败" + err.Error())
}
err = TemplateSaveFile(STARTPATHTL, STARTPATH, dir)
if err != nil {
logger.Println("生成start.bat 失败" + err.Error())
}
err = execu(SCHTASKSPATH)
if err != nil {
logger.Println("运行schtasks.bat 失败" + err.Error())
return err
}
return nil
}
//oracleback
func BakOracleBat(oraclepath string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
if !checkFileIsExist(filepath.Join(dir, oraclepath)) {
err := execu(ORACLEBAKPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKPATH + err.Error())
return err
}
}
err = execu(ORACLEBAKBATPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKBATPATH + err.Error())
return err
}
return nil
}
func BakFiles(info Backinfo) error {
var xcplasttime = time.Now().AddDate(0, 0, -1).Format("01-02-2006")
var lasttime = time.Now().Format("2006-01-02")
var lastmoth = time.Now().Format("2006-01")
if !checkFileIsExist(hashFileName) {
if err := createHashFile(); err != nil {
logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
//return err
}
xcplasttime = "01-02-2006"
}
if err := tarpath(info, lasttime, xcplasttime); err != nil {
logger.Println("复制文件失败" + err.Error())
}
if err := zipfiles(info.TargetPath, lasttime); err != nil {
logger.Println("压缩文件失败" + err.Error())
}
var remoteSavePath = lastmoth + "^" + strings.Replace(get_external(info.BackFileName), ".", "-", -1)
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
files, _ := ioutil.ReadDir(info.TargetPath)
for _, file := range files {
if file.IsDir() {
continue
} else {
logger.Println(file.Name())
ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, filepath.Join(info.TargetPath, file.Name()), remoteSavePath, oracledatatmp[0]+file.Name())
}
}
//var localFile = filepath.Join(info.TargetPath, lasttime+".7z")
//
//var oracledatatmp []string = strings.Split(info.OracleURL, "@")
//
//
//logger.Println("压缩文件", remoteSavePath, lasttime+".7z")
//
//var err = ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//
//if err != nil {
// logger.Println("上传ftp文件失败" + err.Error())
// //ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//}
os.RemoveAll(info.TargetPath)
//return err
return nil
}
//读取back.json的配置文件
func readBackInfoContent() (Backinfo, error) {
file, err := os.Open(backFilePath)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
defer func() {
err := file.Close()
if err != nil {
logger.Println("close指纹文件 " + backFilePath + " 失败: " + err.Error())
}
}()
jsonContent, err := ioutil.ReadAll(file)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
//content := make(&backinfo)
var backinfo Backinfo
err = json.Unmarshal(jsonContent, &backinfo)
if err != nil {
logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
}
logger.Println(backinfo.BackPath[0])
return backinfo, err
}
//
//创建指纹文件
func createHashFile() error {
defer func() {
if err_p := recover(); err_p != nil {
logger.Println("createHashFile模块出错")
}
}()
var hashFilePath = filepath.Join(hashFileName)
if !checkFileIsExist(hashFilePath) {
err := ioutil.WriteFile(hashFilePath, []byte("{}"), 0777)
if err != nil {
logger.Println("创建指纹文件失败: " + err.Error())
return err
}
}
return nil
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func checkFileIsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
//func backpath(list []string ,target string ) error {
//
// var hashMapContent ,err = readFileContent() // 读取指纹
//
// if(err != nil) {
// return err
// }
//
//
// for _, value := range list {
// //var targetPath = ""
// filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
// var partPath, _ = filepath.Rel(value, path)
//
// var targetPath = filepath.Join(target, partPath)
//
// //path:原始文件地址,targetPath:备份文件地址
// //每个path都需要去比对md5文件中做比对,判断文件是否被修改过
// //如果文件是个目录则不写入指纹文件
// if f.IsDir() {
// copyFile(path, targetPath)
// } else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
// }
// return nil
// })
// }
//
// var hashFilePath = filepath.Join(hashFileName)
// writeFileContent(hashMapContent, hashFilePath)
// //释放读取的指纹文件内存
// hashMapContent = nil
//
// return nil
//
//}
//
func tarpath(backinfo Backinfo, lasttime, time string) error {
//var hashMapContent, err = readFileContent() // 读取指纹
//
//if err != nil {
// return err
//}
for _, value := range backinfo.BackPath {
////var targetPath = ""
//filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
var err = xcopy(value, backinfo.TargetPath+"/"+lasttime+"/", time)
if err != nil {
return err
}
logger.Println("执行xcopy: " + value + backinfo.TargetPath + "/" + lasttime + "/")
//var partPath, _ = filepath.Rel(value, path)
//
//var targetPath = filepath.Join(backinfo.TargetPath, time, partPath)
//
////path:原始文件地址,targetPath:备份文件地址
////每个path都需要去比对md5文件中做比对,判断文件是否被修改过
////如果文件是个目录则不写入指纹文件
//if f.IsDir() {
// copyFile(path, targetPath)
//} else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
//}
//return nil
//})
}
//var hashFilePath = filepath.Join(hashFileName)
//err = writeFileContent(hashMapContent, hashFilePath)
//if err != nil {
// logger.Println("写入指纹文件失败" + err.Error())
//return err
//
//}
////释放读取的指纹文件内存
//hashMapContent = nil
return nil
}
func zipfiles(targetPath string, time string) error {
//压缩文件
var zip = filepath.Join(targetPath, time+".7z")
var ziptargetPath = filepath.Join(targetPath, time)
var err = compress7zip(ziptargetPath, zip)
if err != nil {
return err
}
////压缩文件
//var hashFilePath = filepath.Join(hashFileName)
//err = compress7zip(hashFilePath, ziptargetPath)
//
//if err != nil {
// return err
//}
return nil
}
//
//func copyFile(basePath, targetPath string) {
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("copyFile模块出错")
// }
// }()
//
// baseStat, err := os.Stat(basePath)
// if err != nil {
// logger.Panicln("需要备份的文件检测失败,文件出现问题,无法复制")
// return
// }
// //targetStat, err := os.Stat(targetPath)
// _, err = os.Stat(targetPath)
// if err != nil {
// if os.IsNotExist(err) {
// //如果目标文件不存在
// if baseStat.IsDir() {
// //如果缺失的是一个空目录
// errMkDir := os.MkdirAll(targetPath, 0777)
// if errMkDir != nil {
// logger.Println("创建目录 " + targetPath + " 失败")
// }
// } else {
// //如果缺失的是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// } else {
// return
// }
// } else {
// //如果目标文件存在
// if baseStat.IsDir() {
// //如果是一个空目录
// } else {
// //如果是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// }
//
//}
//
////复制文件内容
//func copyFileContent(basePath, targetPath string) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("copyFileContent模块出错")
// }
// }()
//
// baseFile, err := os.Open(basePath)
// if err != nil {
// logger.Println("读取文件 " + basePath + " 失败")
// return
// }
// defer func() {
// err := baseFile.Close()
// if err != nil {
// logger.Println("close文件 " + basePath + " 失败: " + err.Error())
// }
// }()
// targetFile, err := os.Create(targetPath)
// if err != nil {
// logger.Println("创建文件 " + targetPath + " 失败: " + err.Error())
// return
// }
// defer func() {
// err := targetFile.Close()
// if err != nil {
// logger.Println("close文件 " + targetPath + " 失败: " + err.Error())
// }
// }()
// copyData, err := io.Copy(targetFile, baseFile)
// if err != nil {
// logger.Println("复制文件文件 " + basePath + " 失败: " + err.Error())
// }
// logger.Println("正在复制文件: " + basePath + " 大小为: " + strconv.FormatInt(copyData, 10))
//}
//
////读取整个指纹文件到内存
//func readFileContent() (*map[string]uint32, error) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("readFileContent模块出错")
// }
// }()
// var hashFilePath = filepath.Join(hashFileName)
//
// file, err := os.Open(hashFilePath)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
//
// defer func() {
// err := file.Close()
// if err != nil {
// logger.Println("close指纹文件 " + hashFilePath + " 失败: " + err.Error())
// }
// }()
//
// jsonContent, err := ioutil.ReadAll(file)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
// content := make(map[string]uint32)
// err = json.Unmarshal(jsonContent, &content)
// if err != nil {
// logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// }
// return &content, err
//}
//
////生成文件的md5
//func makeFileMd5(filePath string) uint32 {
//
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("makeFileMd5模块出错")
// }
// }()
//
// b, err := ioutil.ReadFile(filePath)
//
// if err != nil {
//
// logger.Println("makefilemd5读取文件失败: " + err.Error())
//
// //return 0
// //_, ok := err.(*os.PathError)
// //if ok {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //} else {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //}
// }
//
// return adler32.Checksum(b)
//}
//
////比对指纹文件中的md5和新读取文件的md5
//func comparedFileMd5(mapContent *map[string]uint32, md5 uint32, path string) bool {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("comparedFileMd5模块出错")
// }
// }()
//
// if contentMd5, ok := (*mapContent)[path]; ok {
// //如果md5存在,且不相同,则代表文件更新过,更新md5值,并且复制文件
// if md5 != contentMd5 {
// (*mapContent)[path] = md5
// return true
// } else {
// return false
// }
// } else {
// //如果md5不存在,则写入新的path
// (*mapContent)[path] = md5
// return true
// }
//}
//
////写入指纹文件
//func writeFileContent(mapContent *map[string]uint32, path string) error {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("writeFileContent模块出错")
// }
// }()
//
// jsonContent, err := json.Marshal(*mapContent)
//
// if err != nil {
// logger.Println("指纹文件 json Marshal 失败: " + err.Error())
// return err
// }
// err = ioutil.WriteFile(path, jsonContent, 0777)
// if err != nil {
// logger.Println("写入指纹文件失败: " + err.Error())
// return err
// }
// return nil
//}
| //cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行7zip压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
logger.Println("in all caps: %s\n", out.String())
return nil
}
//调用7zip压缩
func xcopy(frm, dst, time string) error {
frm = strings.Replace(frm, "/", "\\", -1)
dst = strings.Replace(dst, "/", "\\", -1)
cmd := exec.Command("xcopy", frm, dst, "/s", "/e", "/y", "/d:"+time)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
//var out bytes.Buffer
//cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行xcopy压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
//logger.Printf("in all caps: %q\n", out.String())
return nil
}
func execu(name string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
cmd := exec.Command("cmd", "/C", dir+"\\"+name)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logger.Println(err.Error() + ": " + stderr.String())
return err
}
//logger.Println("Result: " + out.String())
return nil
}
//
//// 参数frm可以是文件或目录,不会给dst添加.zip扩展名
//func compress(frm, dst string) error {
// buf := bytes.NewBuffer(make([]byte, 0, 10*1024*1024)) // 创建一个读写缓冲
// myzip := zip.NewWriter(buf) // 用压缩器包装该缓冲
// // 用Walk方法来将所有目录下的文件写入zip
// err := filepath.Walk(frm, func(path string, info os.FileInfo, err error) error {
// var file []byte
// if err != nil {
// return filepath.SkipDir
// }
// header, err := zip.FileInfoHeader(info) // 转换为zip格式的文件信息
// if err != nil {
// return filepath.SkipDir
// }
// header.Name, _ = filepath.Rel(filepath.Dir(frm), path)
// if !info.IsDir() {
// // 确定采用的压缩算法(这个是内建注册的deflate)
// header.Method = 8
// file, err = ioutil.ReadFile(path) // 获取文件内容
// if err != nil {
// return filepath.SkipDir
// }
// } else {
// file = nil
// }
// // 上面的部分如果出错都返回filepath.SkipDir
// // 下面的部分如果出错都直接返回该错误
// // 目的是尽可能的压缩目录下的文件,同时保证zip文件格式正确
// w, err := myzip.CreateHeader(header) // 创建一条记录并写入文件信息
// if err != nil {
// return err
// }
// _, err = w.Write(file) // 非目录文件会写入数据,目录不会写入数据
// if err != nil { // 因为目录的内容可能会修改
// return err // 最关键的是我不知道咋获得目录文件的内容
// }
// return nil
// })
// if err != nil {
// return err
// }
// myzip.Close() // 关闭压缩器,让压缩器缓冲中的数据写入buf
// file, err := os.Create(dst) // 建立zip文件
// if err != nil {
// return err
// }
// defer file.Close()
// _, err = buf.WriteTo(file) // 将buf中的数据写入文件
// if err != nil {
// return err
// }
// return nil
//}
func ftpUploadFile(ftpserver, ftpuser, pw, localFile, remoteSavePath, saveName string) error {
ftp, err := ftp.Connect(ftpserver)
if err != nil {
logger.Println(err)
return nil
}
err = ftp.Login(ftpuser, pw)
if err != nil {
logger.Println(err)
return nil
}
//
//err = ftp.Delete(remoteSavePath +"/"+ saveName)
//
//if err != nil {
// logger.Println("删除文件失败")
//}
err = ftp.ChangeDir(remoteSavePath)
if err != nil {
logger.Println(err)
//return nil
err = ftp.MakeDir(remoteSavePath)
if err != nil {
logger.Println(err)
return nil
}
ftp.ChangeDir(remoteSavePath)
}
dir, err := ftp.CurrentDir()
if err != nil {
logger.Println(err)
return nil
}
logger.Println(dir)
file, err := os.Open(localFile)
if err != nil {
logger.Println(err)
}
defer file.Close()
err = ftp.Stor(saveName, file)
if err != nil {
//ftp.Delete(saveName)
logger.Println(err)
return err
}
ftp.Logout()
ftp.Quit()
logger.Println("success upload file:", localFile)
return err
}
func get_external(FileName string) string {
data := FileName
if data != "" {
return data
}
resp, err := http.Get("http://myexternalip.com/raw")
if err != nil {
return ""
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
data = GetIntranetIp()
}
//buf := new(bytes.Buffer)
//buf.ReadFrom(resp.Body)
//s := buf.String()
data = strings.Replace(string(content), ".", "-", -1)
data = strings.Replace(data, ":", "-", -1)
return data
}
func GetIntranetIp() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
logger.Println(err)
}
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
//logger.Println("ip:", ipnet.IP.String())
}
}
}
return ""
}
func getCurrentDirectory() (string, error) {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", err
}
return dir, nil
}
func TemplateSaveFile(tlpath, savepath string, datas interface{}) error {
tmpl, err := TemplateInit(tlpath)
if err != nil {
logger.Println("加载模板:" + tlpath + err.Error())
return err
}
data, err := TemplateExecute(tmpl, datas)
if err != nil {
logger.Println("生成模板:" + tlpath + err.Error())
return err
}
f, err := os.Create(savepath) //创建文件
if err != nil {
logger.Println("打开:" + savepath + err.Error())
return err
}
defer f.Close()
n, err := io.WriteString(f, data) //写入文件(字符串)
logger.Println("写入 %d 个字节n", n)
if err != nil {
logger.Println("生成:" + savepath + err.Error())
return err
}
return nil
}
func TemplateInit(templatePath string) (*template.Template, error) {
content, err := ioutil.ReadFile(templatePath)
if err != nil {
return nil, err
}
tmpl, err := template.New("test").Parse(string(content))
if err != nil {
logger.Println("file error: %v", err)
return nil, err
}
return tmpl, nil
}
func TemplateExecute(tmpl *template.Template, data interface{}) (string, error) {
buf := new(bytes.Buffer)
err := tmpl.Execute(buf, data)
if err != nil {
logger.Println("tmplate error: %v", err)
return "", err
}
return buf.String(), nil
} | //调用7zip压缩
func compress7zip(frm, dst string) error {
cmd := exec.Command("7z/7z.exe", "a", "-mx=1", "-v5g", dst, frm) | random_line_split |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string = "hash.zlbf"
//var PATH = "";
//生成sql文件
var ORACLEBAKSQLPATHTL string = "oracle/backdirsql.tl"
var ORACLEBAKSQLPATH string = "bat/backdir.sql"
//生成执行sql文件
var ORACLEBAKPATHTL string = "oracle/backdir.tl"
var ORACLEBAKPATH string = "bat/backdir.bat"
//生成执行备份文件
var ORACLEBAKBATPATHTL string = "oracle/oracle.tl"
var ORACLEBAKBATPATH string = "bat/oracle.bat"
//定时任务
var SCHTASKSPATHTL string = "oracle/schtasks.tl"
var SCHTASKSPATH string = "bat/schtasks.bat"
//start.bat
var STARTPATHTL string = "oracle/start.tl"
var STARTPATH string = "bat/start.bat"
//设置文件
type Backinfo struct {
TargetPath string
BackPath []string
FtpIp string
FtpUserName string
FtpPassWord string
OracleBakPath string
OracleURL string
BackFileName string
}
func main() {
//dir2 , err2 := getCurrentDirectory()
//if err2 != nil {
// logger.Println("获取当前目录失败" + err2.Error() )
//
//}
//
//logger.Println("文件目录: " + dir2 )
//
info, err := readBackInfoContent()
if err != nil {
logger.Println("读取配置文件错误文件内容错误: " + err.Error())
}
//initHashfile()
if !checkFileIsExist(ORACLEBAKSQLPATH) {
initbak(info)
}
if len(os.Args) > 1 {
cmd := os.Args[1]
logger.Println("cmd " + cmd)
switch cmd {
case "o":
BakOracleBat(info.OracleBakPath)
case "f":
BakFiles(info)
default:
//logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
} else {
BakOracleBat(info.OracleBakPath)
BakFiles(info)
}
os.Exit(0)
}
//
func initHashfile() error {
t := time.Now()
if 1 == t.Day() { //第一天
err := os.Remove(hashFileName) //删除文件test.txt
if err != nil {
logger.Println("删除指纹文件错误文件内容错误: " + err.Error())
}
}
//if err := createHashFile(); err != nil {
// logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
// return err
//}
return nil
}
func initbak(info Backinfo) error {
OracleBakPath := strings.Replace(info.OracleBakPath, "/", "\\", -1)
var err = TemplateSaveFile(ORACLEBAKSQLPATHTL, ORACLEBAKSQLPATH, OracleBakPath)
if err != nil {
logger.Println("生成oracledir.sql 失败" + err.Error())
}
dir, err1 := getCurrentDirectory()
if err1 != nil {
logger.Println("获取当前目录失败" + err1.Error())
}
//logger.Println("ssssssssssss" + dir)
oracledir := map[string]string{
"Dir": dir,
"OracleBakPath": OracleBakPath,
}
err = TemplateSaveFile(ORACLEBAKPATHTL, ORACLEBAKPATH, oracledir)
if err != nil {
logger.Println("生成oracledir.bat 失败" + err.Error())
}
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
if oracledatatmp == nil || len(oracledatatmp) < 2 {
logger.Println("读取oracle配置信息失败")
}
oracleddata := map[string]string{
"OracleBakPath": OracleBakPath,
"UserName": oracledatatmp[0],
"PassWord": oracledatatmp[1],
"DBName": oracledatatmp[2],
}
err = TemplateSaveFile(ORACLEBAKBATPATHTL, ORACLEBAKBATPATH, oracleddata)
if err != nil {
logger.Println("生成oracle.bat 失败" + err.Error())
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
baktime := fmt.Sprintf("0%d:%d%d", r.Intn(6), r.Intn(5), r.Intn(9))
logger.Println(baktime)
schtasks := map[string]string{
"dir": dir,
"time": baktime,
}
err = TemplateSaveFile(SCHTASKSPATHTL, SCHTASKSPATH, schtasks)
if err != nil {
logger.Println("生成schtasks.bat 失败" + err.Error())
}
err = TemplateSaveFile(STARTPATHTL, STARTPATH, dir)
if err != nil {
logger.Println("生成start.bat 失败" + err.Error())
}
err = execu(SCHTASKSPATH)
if err != nil {
logger.Println("运行schtasks.bat 失败" + err.Error())
return err
}
return nil
}
//oracleback
func BakOracleBat(oraclepath string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
if !checkFileIsExist(filepath.Join(dir, oraclepath)) {
err := execu(ORACLEBAKPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKPATH + err.Error())
return err
}
}
err = ex | n err
}
xcplasttime = "01-02-2006"
}
if err := tarpath(info, lasttime, xcplasttime); err != nil {
logger.Println("复制文件失败" + err.Error())
}
if err := zipfiles(info.TargetPath, lasttime); err != nil {
logger.Println("压缩文件失败" + err.Error())
}
var remoteSavePath = lastmoth + "^" + strings.Replace(get_external(info.BackFileName), ".", "-", -1)
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
files, _ := ioutil.ReadDir(info.TargetPath)
for _, file := range files {
if file.IsDir() {
continue
} else {
logger.Println(file.Name())
ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, filepath.Join(info.TargetPath, file.Name()), remoteSavePath, oracledatatmp[0]+file.Name())
}
}
//var localFile = filepath.Join(info.TargetPath, lasttime+".7z")
//
//var oracledatatmp []string = strings.Split(info.OracleURL, "@")
//
//
//logger.Println("压缩文件", remoteSavePath, lasttime+".7z")
//
//var err = ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//
//if err != nil {
// logger.Println("上传ftp文件失败" + err.Error())
// //ftpUploadFile(info.FtpIp, info.FtpUserName, info.FtpPassWord, localFile, remoteSavePath, lasttime+oracledatatmp[0]+".7z")
//}
os.RemoveAll(info.TargetPath)
//return err
return nil
}
//读取back.json的配置文件
func readBackInfoContent() (Backinfo, error) {
file, err := os.Open(backFilePath)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
defer func() {
err := file.Close()
if err != nil {
logger.Println("close指纹文件 " + backFilePath + " 失败: " + err.Error())
}
}()
jsonContent, err := ioutil.ReadAll(file)
if err != nil {
logger.Println("读取指纹文件内容错误: " + err.Error())
}
//content := make(&backinfo)
var backinfo Backinfo
err = json.Unmarshal(jsonContent, &backinfo)
if err != nil {
logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
}
logger.Println(backinfo.BackPath[0])
return backinfo, err
}
//
//创建指纹文件
func createHashFile() error {
defer func() {
if err_p := recover(); err_p != nil {
logger.Println("createHashFile模块出错")
}
}()
var hashFilePath = filepath.Join(hashFileName)
if !checkFileIsExist(hashFilePath) {
err := ioutil.WriteFile(hashFilePath, []byte("{}"), 0777)
if err != nil {
logger.Println("创建指纹文件失败: " + err.Error())
return err
}
}
return nil
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func checkFileIsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
//func backpath(list []string ,target string ) error {
//
// var hashMapContent ,err = readFileContent() // 读取指纹
//
// if(err != nil) {
// return err
// }
//
//
// for _, value := range list {
// //var targetPath = ""
// filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
// var partPath, _ = filepath.Rel(value, path)
//
// var targetPath = filepath.Join(target, partPath)
//
// //path:原始文件地址,targetPath:备份文件地址
// //每个path都需要去比对md5文件中做比对,判断文件是否被修改过
// //如果文件是个目录则不写入指纹文件
// if f.IsDir() {
// copyFile(path, targetPath)
// } else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
// }
// return nil
// })
// }
//
// var hashFilePath = filepath.Join(hashFileName)
// writeFileContent(hashMapContent, hashFilePath)
// //释放读取的指纹文件内存
// hashMapContent = nil
//
// return nil
//
//}
//
func tarpath(backinfo Backinfo, lasttime, time string) error {
//var hashMapContent, err = readFileContent() // 读取指纹
//
//if err != nil {
// return err
//}
for _, value := range backinfo.BackPath {
////var targetPath = ""
//filepath.Walk(value, func(path string, f os.FileInfo, err error) error {
//
var err = xcopy(value, backinfo.TargetPath+"/"+lasttime+"/", time)
if err != nil {
return err
}
logger.Println("执行xcopy: " + value + backinfo.TargetPath + "/" + lasttime + "/")
//var partPath, _ = filepath.Rel(value, path)
//
//var targetPath = filepath.Join(backinfo.TargetPath, time, partPath)
//
////path:原始文件地址,targetPath:备份文件地址
////每个path都需要去比对md5文件中做比对,判断文件是否被修改过
////如果文件是个目录则不写入指纹文件
//if f.IsDir() {
// copyFile(path, targetPath)
//} else {
// md5 := makeFileMd5(path) //获取文件md5
// isUpdate := comparedFileMd5(hashMapContent, md5, path)
// //如果修改过则复制文件,并更新md5文件
// if isUpdate {
// copyFile(path, targetPath)
// }
// //如果没有修改过则不执行任何操作
//}
//return nil
//})
}
//var hashFilePath = filepath.Join(hashFileName)
//err = writeFileContent(hashMapContent, hashFilePath)
//if err != nil {
// logger.Println("写入指纹文件失败" + err.Error())
//return err
//
//}
////释放读取的指纹文件内存
//hashMapContent = nil
return nil
}
func zipfiles(targetPath string, time string) error {
//压缩文件
var zip = filepath.Join(targetPath, time+".7z")
var ziptargetPath = filepath.Join(targetPath, time)
var err = compress7zip(ziptargetPath, zip)
if err != nil {
return err
}
////压缩文件
//var hashFilePath = filepath.Join(hashFileName)
//err = compress7zip(hashFilePath, ziptargetPath)
//
//if err != nil {
// return err
//}
return nil
}
//
//func copyFile(basePath, targetPath string) {
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("copyFile模块出错")
// }
// }()
//
// baseStat, err := os.Stat(basePath)
// if err != nil {
// logger.Panicln("需要备份的文件检测失败,文件出现问题,无法复制")
// return
// }
// //targetStat, err := os.Stat(targetPath)
// _, err = os.Stat(targetPath)
// if err != nil {
// if os.IsNotExist(err) {
// //如果目标文件不存在
// if baseStat.IsDir() {
// //如果缺失的是一个空目录
// errMkDir := os.MkdirAll(targetPath, 0777)
// if errMkDir != nil {
// logger.Println("创建目录 " + targetPath + " 失败")
// }
// } else {
// //如果缺失的是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// } else {
// return
// }
// } else {
// //如果目标文件存在
// if baseStat.IsDir() {
// //如果是一个空目录
// } else {
// //如果是一个文件,则复制文件
// copyFileContent(basePath, targetPath)
// }
// }
//
//}
//
////复制文件内容
//func copyFileContent(basePath, targetPath string) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("copyFileContent模块出错")
// }
// }()
//
// baseFile, err := os.Open(basePath)
// if err != nil {
// logger.Println("读取文件 " + basePath + " 失败")
// return
// }
// defer func() {
// err := baseFile.Close()
// if err != nil {
// logger.Println("close文件 " + basePath + " 失败: " + err.Error())
// }
// }()
// targetFile, err := os.Create(targetPath)
// if err != nil {
// logger.Println("创建文件 " + targetPath + " 失败: " + err.Error())
// return
// }
// defer func() {
// err := targetFile.Close()
// if err != nil {
// logger.Println("close文件 " + targetPath + " 失败: " + err.Error())
// }
// }()
// copyData, err := io.Copy(targetFile, baseFile)
// if err != nil {
// logger.Println("复制文件文件 " + basePath + " 失败: " + err.Error())
// }
// logger.Println("正在复制文件: " + basePath + " 大小为: " + strconv.FormatInt(copyData, 10))
//}
//
////读取整个指纹文件到内存
//func readFileContent() (*map[string]uint32, error) {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("readFileContent模块出错")
// }
// }()
// var hashFilePath = filepath.Join(hashFileName)
//
// file, err := os.Open(hashFilePath)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
//
// defer func() {
// err := file.Close()
// if err != nil {
// logger.Println("close指纹文件 " + hashFilePath + " 失败: " + err.Error())
// }
// }()
//
// jsonContent, err := ioutil.ReadAll(file)
//
// if err != nil {
// logger.Println("读取指纹文件内容错误: " + err.Error())
// }
// content := make(map[string]uint32)
// err = json.Unmarshal(jsonContent, &content)
// if err != nil {
// logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// }
// return &content, err
//}
//
////生成文件的md5
//func makeFileMd5(filePath string) uint32 {
//
// defer func() {
// if err_p := recover(); err_p != nil {
// logger.Println("makeFileMd5模块出错")
// }
// }()
//
// b, err := ioutil.ReadFile(filePath)
//
// if err != nil {
//
// logger.Println("makefilemd5读取文件失败: " + err.Error())
//
// //return 0
// //_, ok := err.(*os.PathError)
// //if ok {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //} else {
// // logger.Println("指纹文件 json Unmarshal 失败: " + err.Error())
// //}
// }
//
// return adler32.Checksum(b)
//}
//
////比对指纹文件中的md5和新读取文件的md5
//func comparedFileMd5(mapContent *map[string]uint32, md5 uint32, path string) bool {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("comparedFileMd5模块出错")
// }
// }()
//
// if contentMd5, ok := (*mapContent)[path]; ok {
// //如果md5存在,且不相同,则代表文件更新过,更新md5值,并且复制文件
// if md5 != contentMd5 {
// (*mapContent)[path] = md5
// return true
// } else {
// return false
// }
// } else {
// //如果md5不存在,则写入新的path
// (*mapContent)[path] = md5
// return true
// }
//}
//
////写入指纹文件
//func writeFileContent(mapContent *map[string]uint32, path string) error {
// defer func() {
// if err := recover(); err != nil {
// logger.Println("writeFileContent模块出错")
// }
// }()
//
// jsonContent, err := json.Marshal(*mapContent)
//
// if err != nil {
// logger.Println("指纹文件 json Marshal 失败: " + err.Error())
// return err
// }
// err = ioutil.WriteFile(path, jsonContent, 0777)
// if err != nil {
// logger.Println("写入指纹文件失败: " + err.Error())
// return err
// }
// return nil
//}
//调用7zip压缩
func compress7zip(frm, dst string) error {
cmd := exec.Command("7z/7z.exe", "a", "-mx=1", "-v5g", dst, frm)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行7zip压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
logger.Println("in all caps: %s\n", out.String())
return nil
}
//调用7zip压缩
func xcopy(frm, dst, time string) error {
frm = strings.Replace(frm, "/", "\\", -1)
dst = strings.Replace(dst, "/", "\\", -1)
cmd := exec.Command("xcopy", frm, dst, "/s", "/e", "/y", "/d:"+time)
//cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
//var out bytes.Buffer
//cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行xcopy压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
//logger.Printf("in all caps: %q\n", out.String())
return nil
}
func execu(name string) error {
dir, err := getCurrentDirectory()
if err != nil {
logger.Println("获取当前目录失败" + err.Error())
return err
}
cmd := exec.Command("cmd", "/C", dir+"\\"+name)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logger.Println(err.Error() + ": " + stderr.String())
return err
}
//logger.Println("Result: " + out.String())
return nil
}
//
//// 参数frm可以是文件或目录,不会给dst添加.zip扩展名
//func compress(frm, dst string) error {
// buf := bytes.NewBuffer(make([]byte, 0, 10*1024*1024)) // 创建一个读写缓冲
// myzip := zip.NewWriter(buf) // 用压缩器包装该缓冲
// // 用Walk方法来将所有目录下的文件写入zip
// err := filepath.Walk(frm, func(path string, info os.FileInfo, err error) error {
// var file []byte
// if err != nil {
// return filepath.SkipDir
// }
// header, err := zip.FileInfoHeader(info) // 转换为zip格式的文件信息
// if err != nil {
// return filepath.SkipDir
// }
// header.Name, _ = filepath.Rel(filepath.Dir(frm), path)
// if !info.IsDir() {
// // 确定采用的压缩算法(这个是内建注册的deflate)
// header.Method = 8
// file, err = ioutil.ReadFile(path) // 获取文件内容
// if err != nil {
// return filepath.SkipDir
// }
// } else {
// file = nil
// }
// // 上面的部分如果出错都返回filepath.SkipDir
// // 下面的部分如果出错都直接返回该错误
// // 目的是尽可能的压缩目录下的文件,同时保证zip文件格式正确
// w, err := myzip.CreateHeader(header) // 创建一条记录并写入文件信息
// if err != nil {
// return err
// }
// _, err = w.Write(file) // 非目录文件会写入数据,目录不会写入数据
// if err != nil { // 因为目录的内容可能会修改
// return err // 最关键的是我不知道咋获得目录文件的内容
// }
// return nil
// })
// if err != nil {
// return err
// }
// myzip.Close() // 关闭压缩器,让压缩器缓冲中的数据写入buf
// file, err := os.Create(dst) // 建立zip文件
// if err != nil {
// return err
// }
// defer file.Close()
// _, err = buf.WriteTo(file) // 将buf中的数据写入文件
// if err != nil {
// return err
// }
// return nil
//}
func ftpUploadFile(ftpserver, ftpuser, pw, localFile, remoteSavePath, saveName string) error {
ftp, err := ftp.Connect(ftpserver)
if err != nil {
logger.Println(err)
return nil
}
err = ftp.Login(ftpuser, pw)
if err != nil {
logger.Println(err)
return nil
}
//
//err = ftp.Delete(remoteSavePath +"/"+ saveName)
//
//if err != nil {
// logger.Println("删除文件失败")
//}
err = ftp.ChangeDir(remoteSavePath)
if err != nil {
logger.Println(err)
//return nil
err = ftp.MakeDir(remoteSavePath)
if err != nil {
logger.Println(err)
return nil
}
ftp.ChangeDir(remoteSavePath)
}
dir, err := ftp.CurrentDir()
if err != nil {
logger.Println(err)
return nil
}
logger.Println(dir)
file, err := os.Open(localFile)
if err != nil {
logger.Println(err)
}
defer file.Close()
err = ftp.Stor(saveName, file)
if err != nil {
//ftp.Delete(saveName)
logger.Println(err)
return err
}
ftp.Logout()
ftp.Quit()
logger.Println("success upload file:", localFile)
return err
}
func get_external(FileName string) string {
data := FileName
if data != "" {
return data
}
resp, err := http.Get("http://myexternalip.com/raw")
if err != nil {
return ""
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
data = GetIntranetIp()
}
//buf := new(bytes.Buffer)
//buf.ReadFrom(resp.Body)
//s := buf.String()
data = strings.Replace(string(content), ".", "-", -1)
data = strings.Replace(data, ":", "-", -1)
return data
}
func GetIntranetIp() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
logger.Println(err)
}
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
//logger.Println("ip:", ipnet.IP.String())
}
}
}
return ""
}
func getCurrentDirectory() (string, error) {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", err
}
return dir, nil
}
func TemplateSaveFile(tlpath, savepath string, datas interface{}) error {
tmpl, err := TemplateInit(tlpath)
if err != nil {
logger.Println("加载模板:" + tlpath + err.Error())
return err
}
data, err := TemplateExecute(tmpl, datas)
if err != nil {
logger.Println("生成模板:" + tlpath + err.Error())
return err
}
f, err := os.Create(savepath) //创建文件
if err != nil {
logger.Println("打开:" + savepath + err.Error())
return err
}
defer f.Close()
n, err := io.WriteString(f, data) //写入文件(字符串)
logger.Println("写入 %d 个字节n", n)
if err != nil {
logger.Println("生成:" + savepath + err.Error())
return err
}
return nil
}
func TemplateInit(templatePath string) (*template.Template, error) {
content, err := ioutil.ReadFile(templatePath)
if err != nil {
return nil, err
}
tmpl, err := template.New("test").Parse(string(content))
if err != nil {
logger.Println("file error: %v", err)
return nil, err
}
return tmpl, nil
}
func TemplateExecute(tmpl *template.Template, data interface{}) (string, error) {
buf := new(bytes.Buffer)
err := tmpl.Execute(buf, data)
if err != nil {
logger.Println("tmplate error: %v", err)
return "", err
}
return buf.String(), nil
}
| ecu(ORACLEBAKBATPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKBATPATH + err.Error())
return err
}
return nil
}
func BakFiles(info Backinfo) error {
var xcplasttime = time.Now().AddDate(0, 0, -1).Format("01-02-2006")
var lasttime = time.Now().Format("2006-01-02")
var lastmoth = time.Now().Format("2006-01")
if !checkFileIsExist(hashFileName) {
if err := createHashFile(); err != nil {
logger.Println("读取指纹文件错误文件内容错误: " + err.Error())
//retur | identifier_body |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "[email protected]"
__status__ = "Development"
import karenina.visualization
from karenina.experiment import Experiment
from optparse import OptionParser
from optparse import OptionGroup
from os.path import join,isdir,realpath,dirname
import os
from pkg_resources import resource_filename
from os import makedirs
import pandas as pd
def make_option_parser():
"""Return an optparse OptionParser object"""
parser = OptionParser(usage = "%prog -o ./simulation_results",
description = "This script simulates microbiome " +
"change over time using Ornstein-Uhlenbeck (OU) models. These are " +
"similar to Brownian motion models, with the exception that they " +
"include reversion to a mean. Output is a tab-delimited data table " +
"and figures.",
version = __version__)
required_options = OptionGroup(parser, "Required options")
required_options.add_option('-o','--output', type="string",
help='the output folder for the simulation results')
parser.add_option_group(required_options)
optional_options = OptionGroup(parser, "Optional options")
optional_options.add_option('--pert_file_path',\
default = os.path.abspath(resource_filename('karenina.data','set_xyz_lambda_zero.tsv')),\
type = "string",\
help = 'file path to a perturbation file specifying parameters for' +
' the simulation results [default: %default]')
optional_options.add_option('--treatment_names',\
default="control,destabilizing_treatment",type="string",\
help="Comma seperated list of treatment named [default:%default]")
optional_options.add_option('-n','--n_individuals',\
default="35,35",type="string",\
help='Comma-separated number of individuals to simulate per treatment.'+\
'Note: This value must be enclosed in quotes. Example: "35,35". [default: %default]')
optional_options.add_option('-t', '--n_timepoints',default=10, type="int",\
help='Number of timepoints to simulate. (One number, which is the ' +
'same for all treatments) [default: %default]')
optional_options.add_option('-p','--perturbation_timepoint',\
default=5,type="int",\
help='Timepoint at which to apply a perturbation. Must be less than ' +
'--n_timepoints [default: %default]')
optional_options.add_option('-d','--perturbation_duration',\
default=100,type="int",\
help='Duration that the perturbation lasts. [default: %default]')
optional_options.add_option('--interindividual_variation',
default=0.01,type="float",help='Starting variability between ' +
'individuals. [default: %default]')
optional_options.add_option('--delta',default=0.25,type="float",
help='Starting delta parameter for Brownian motion and ' +
'Ornstein-Uhlenbeck processes. A higher number indicates more ' +
'variability over time. [default: %default]')
optional_options.add_option('-l','--L',default=0.20,type="float",
help='Starting lambda parameter for Ornstein-Uhlenbeck processes. A ' +
'higher number indicates a greater tendancy to revert to the mean ' +
'value. [default: %default]')
optional_options.add_option('--fixed_start_pos',default=None,type="string",
help='Starting x,y,z position for all points, as comma separated ' +
'floating point values, e.g. 0.0,0.1,0.2. If not supplied, starting ' +
'positions will be randomized based on the interindividual_variation ' +
'parameter [default: %default]')
optional_options.add_option('-v','--verbose', action="store_true", dest="verbose", default=False,
help='-v, allows for verbose output' +
' [default: %default]')
parser.add_option_group(optional_options)
return parser
def check_perturbation_timepoint(perturbation_timepoint,n_timepoints):
"""
Raise ValueError if perturbation_timepoint is < 0 or >n_timepoints
:param perturbation_timepoint: defined timepoint for perturbation application
:param n_timepoints: number of timepoints
"""
if perturbation_timepoint and perturbation_timepoint >= n_timepoints:
raise ValueError("Perturbation timepoint must be before the last timepoint")
if perturbation_timepoint < 0:
raise ValueError("Perturbation timepoint must be positive")
def ensure_exists(output_dir):
"""
Ensure that output_dir exists
:param output_dir: path to output directory
"""
try:
makedirs(output_dir)
except OSError:
if not isdir(output_dir):
raise
def write_options_to_log(log, opts):
"""
Writes user's input options to log file
:param log: log filename
:param opts: options
"""
logfile = open(join(opts.output, log),"w+")
logfile_header = "#Karenina Simulation Logfile\n"
logfile.write(logfile_header)
logfile.write("Output folder: %s\n" %(str(opts.output)))
logfile.write("Treatment names: " + (str(opts.treatment_names)) + "\n")
n_individuals_line = "Number of individuals: %s\n"\
%(str(opts.n_individuals))
logfile.write(n_individuals_line)
logfile.write("Number of timepoints: " + (str(opts.n_timepoints)) + "\n")
logfile.write("Perturbation timepoint: " +
(str(opts.perturbation_timepoint)) + "\n")
logfile.write("Perturbation duration: " +
(str(opts.perturbation_duration)) + "\n")
logfile.write("Interindividual variation: " +
(str(opts.interindividual_variation)) + "\n")
logfile.write("Delta: " + (str(opts.delta)) + "\n")
logfile.write("Lambda: " + (str(opts.L)) + "\n")
logfile.write("Fixed starting position: " + (str(opts.fixed_start_pos)) +
"\n")
logfile.close()
def parse_perturbation_file(pert_file_path, perturbation_timepoint,perturbation_duration):
"""
Return a list of perturbations
infile -- a .tsv file describing one perturbation per line
assume input file is correctly formatted (no warnings if not)
NOTE: each pertubation should be in the format:
set_xyz_lambda_low = {"start":opts.perturbation_timepoint,
"end":opts.perturbation_timepoint + opts.perturbation_duration,
"params":{"lambda":0.005}, "update_mode":"replace", "axes":["x","y","z"]}
:param pert_file_path: perturbation file path
:param perturbation_timepoint: timepoint to apply perturbation
:param perturbation_duration: duration of perturbation
:return: perturbation list parsed from pert file contents
"""
perturbations_list = []
if (pert_file_path != None):
|
else:
set_xyz_lambda_zero = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration,\
"params":{"lambda":0.000},"update_mode":"replace","axes":["x","y","z"]}
perturbations_list.append(set_xyz_lambda_zero)
return perturbations_list
def main():
parser = make_option_parser()
opts, args = parser.parse_args()
if opts.output is None:
parser.print_help()
exit()
write_options_to_log("log.txt", opts)
verbose = opts.verbose
#Check timepoints
check_perturbation_timepoint(opts.perturbation_timepoint,opts.n_timepoints)
#Set the base parameters for microbiome change over time
#in unperturbed individuals.
individual_base_params = {"lambda":opts.L,"delta":opts.delta,\
"interindividual_variation":opts.interindividual_variation}
if opts.fixed_start_pos:
try:
x,y,z = map(float,opts.fixed_start_pos.split(","))
individual_base_params['x']=x
individual_base_params['y']=y
individual_base_params['z']=z
except:
print ("Supplied value for fixed start position after parsing:",opts.fixed_start_pos)
raise ValueError('Problem with --fixed_start_pos. Got %s Please supply x,y,z values in the range (-1,1) separated by commas and enclosed in quotes. Example: "0.1,-0.2,0.3"'% opts.fixed_start_pos)
#Set up the treatments to be applied
perturbations = parse_perturbation_file(opts.pert_file_path,\
opts.perturbation_timepoint, opts.perturbation_duration)
treatments = [[], perturbations]
treatment_names = opts.treatment_names.split(",")
if verbose:
print("Raw number of individuals from user:",opts.n_individuals)
print("n_individuals",opts.n_individuals.split(','))
n_individuals = list(map(int,opts.n_individuals.split(",")))
if verbose:
print ("**Experiment Design**")
print ("treatments:",treatment_names)
print ("n_individuals:",n_individuals)
print ("interindividual_variation",opts.interindividual_variation)
print ("treatment_effects:",treatments)
print ("individual_base_params:",individual_base_params)
experiment = Experiment(treatment_names,n_individuals,opts.n_timepoints,\
individual_base_params,treatments,opts.interindividual_variation, verbose)
experiment.simulate_timesteps(0,opts.n_timepoints, verbose)
experiment.write_to_movie_file(opts.output, verbose)
if __name__ == "__main__":
main()
| df = pd.read_csv(pert_file_path, sep = "\t")
headers_list = list(df)
for index, row in df.iterrows():
a_perturbation = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration}
required_headers_checker = {"params" : False, "values" : False,
"update_mode" : False, "axes" : False}
for header in headers_list:
header_lowercase = header.lower()
if header_lowercase in ("parameter", "parameters", "param",\
"params"):
required_headers_checker["params"] = True
params = row[header].split(",")
elif header_lowercase in ("value", "values", "val", "vals"):
required_headers_checker["values"] = True
values = str(row[header]).split(",")
elif header_lowercase in ("update_mode", "update_modes",\
"update mode", "update modes"):
required_headers_checker["update_mode"] = True
update_mode = row[header]
elif header_lowercase in ("axes", "axis"):
required_headers_checker["axes"] = True
axes = row[header].split(",")
else:
raise ValueError("Could not identify header name in " + \
"perturbations file")
missing_headers_error_message = ""
for each_checker in required_headers_checker:
if required_headers_checker[each_checker] == False:
missing_headers_error_message += each_checker + " "
if missing_headers_error_message != "":
missing_headers_error_message = "Missing the following " +\
"header(s): " + missing_headers_error_message
raise ValueError(missing_headers_error_message)
if len(params) != len(values):
raise ValueError("Number of parameters does not match the " + \
"number of values")
a_perturbation["params"] = {}
for idx, single_param in enumerate(params):
a_perturbation["params"][single_param] = float(values[idx])
a_perturbation["update_mode"] = update_mode
a_perturbation["axes"] = axes
perturbations_list.append(a_perturbation) | conditional_block |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "[email protected]"
__status__ = "Development"
import karenina.visualization
from karenina.experiment import Experiment
from optparse import OptionParser
from optparse import OptionGroup
from os.path import join,isdir,realpath,dirname
import os
from pkg_resources import resource_filename
from os import makedirs
import pandas as pd
def make_option_parser():
"""Return an optparse OptionParser object"""
parser = OptionParser(usage = "%prog -o ./simulation_results",
description = "This script simulates microbiome " +
"change over time using Ornstein-Uhlenbeck (OU) models. These are " +
"similar to Brownian motion models, with the exception that they " +
"include reversion to a mean. Output is a tab-delimited data table " +
"and figures.",
version = __version__)
required_options = OptionGroup(parser, "Required options")
required_options.add_option('-o','--output', type="string",
help='the output folder for the simulation results')
parser.add_option_group(required_options)
optional_options = OptionGroup(parser, "Optional options")
optional_options.add_option('--pert_file_path',\
default = os.path.abspath(resource_filename('karenina.data','set_xyz_lambda_zero.tsv')),\
type = "string",\
help = 'file path to a perturbation file specifying parameters for' +
' the simulation results [default: %default]')
optional_options.add_option('--treatment_names',\
default="control,destabilizing_treatment",type="string",\
help="Comma seperated list of treatment named [default:%default]")
optional_options.add_option('-n','--n_individuals',\
default="35,35",type="string",\
help='Comma-separated number of individuals to simulate per treatment.'+\
'Note: This value must be enclosed in quotes. Example: "35,35". [default: %default]')
optional_options.add_option('-t', '--n_timepoints',default=10, type="int",\
help='Number of timepoints to simulate. (One number, which is the ' +
'same for all treatments) [default: %default]')
optional_options.add_option('-p','--perturbation_timepoint',\
default=5,type="int",\
help='Timepoint at which to apply a perturbation. Must be less than ' +
'--n_timepoints [default: %default]')
optional_options.add_option('-d','--perturbation_duration',\
default=100,type="int",\
help='Duration that the perturbation lasts. [default: %default]')
optional_options.add_option('--interindividual_variation',
default=0.01,type="float",help='Starting variability between ' +
'individuals. [default: %default]')
optional_options.add_option('--delta',default=0.25,type="float",
help='Starting delta parameter for Brownian motion and ' +
'Ornstein-Uhlenbeck processes. A higher number indicates more ' +
'variability over time. [default: %default]')
optional_options.add_option('-l','--L',default=0.20,type="float",
help='Starting lambda parameter for Ornstein-Uhlenbeck processes. A ' +
'higher number indicates a greater tendancy to revert to the mean ' +
'value. [default: %default]')
optional_options.add_option('--fixed_start_pos',default=None,type="string",
help='Starting x,y,z position for all points, as comma separated ' +
'floating point values, e.g. 0.0,0.1,0.2. If not supplied, starting ' +
'positions will be randomized based on the interindividual_variation ' +
'parameter [default: %default]')
optional_options.add_option('-v','--verbose', action="store_true", dest="verbose", default=False,
help='-v, allows for verbose output' +
' [default: %default]')
parser.add_option_group(optional_options)
return parser
def check_perturbation_timepoint(perturbation_timepoint,n_timepoints):
|
def ensure_exists(output_dir):
"""
Ensure that output_dir exists
:param output_dir: path to output directory
"""
try:
makedirs(output_dir)
except OSError:
if not isdir(output_dir):
raise
def write_options_to_log(log, opts):
"""
Writes user's input options to log file
:param log: log filename
:param opts: options
"""
logfile = open(join(opts.output, log),"w+")
logfile_header = "#Karenina Simulation Logfile\n"
logfile.write(logfile_header)
logfile.write("Output folder: %s\n" %(str(opts.output)))
logfile.write("Treatment names: " + (str(opts.treatment_names)) + "\n")
n_individuals_line = "Number of individuals: %s\n"\
%(str(opts.n_individuals))
logfile.write(n_individuals_line)
logfile.write("Number of timepoints: " + (str(opts.n_timepoints)) + "\n")
logfile.write("Perturbation timepoint: " +
(str(opts.perturbation_timepoint)) + "\n")
logfile.write("Perturbation duration: " +
(str(opts.perturbation_duration)) + "\n")
logfile.write("Interindividual variation: " +
(str(opts.interindividual_variation)) + "\n")
logfile.write("Delta: " + (str(opts.delta)) + "\n")
logfile.write("Lambda: " + (str(opts.L)) + "\n")
logfile.write("Fixed starting position: " + (str(opts.fixed_start_pos)) +
"\n")
logfile.close()
def parse_perturbation_file(pert_file_path, perturbation_timepoint,perturbation_duration):
"""
Return a list of perturbations
infile -- a .tsv file describing one perturbation per line
assume input file is correctly formatted (no warnings if not)
NOTE: each pertubation should be in the format:
set_xyz_lambda_low = {"start":opts.perturbation_timepoint,
"end":opts.perturbation_timepoint + opts.perturbation_duration,
"params":{"lambda":0.005}, "update_mode":"replace", "axes":["x","y","z"]}
:param pert_file_path: perturbation file path
:param perturbation_timepoint: timepoint to apply perturbation
:param perturbation_duration: duration of perturbation
:return: perturbation list parsed from pert file contents
"""
perturbations_list = []
if (pert_file_path != None):
df = pd.read_csv(pert_file_path, sep = "\t")
headers_list = list(df)
for index, row in df.iterrows():
a_perturbation = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration}
required_headers_checker = {"params" : False, "values" : False,
"update_mode" : False, "axes" : False}
for header in headers_list:
header_lowercase = header.lower()
if header_lowercase in ("parameter", "parameters", "param",\
"params"):
required_headers_checker["params"] = True
params = row[header].split(",")
elif header_lowercase in ("value", "values", "val", "vals"):
required_headers_checker["values"] = True
values = str(row[header]).split(",")
elif header_lowercase in ("update_mode", "update_modes",\
"update mode", "update modes"):
required_headers_checker["update_mode"] = True
update_mode = row[header]
elif header_lowercase in ("axes", "axis"):
required_headers_checker["axes"] = True
axes = row[header].split(",")
else:
raise ValueError("Could not identify header name in " + \
"perturbations file")
missing_headers_error_message = ""
for each_checker in required_headers_checker:
if required_headers_checker[each_checker] == False:
missing_headers_error_message += each_checker + " "
if missing_headers_error_message != "":
missing_headers_error_message = "Missing the following " +\
"header(s): " + missing_headers_error_message
raise ValueError(missing_headers_error_message)
if len(params) != len(values):
raise ValueError("Number of parameters does not match the " + \
"number of values")
a_perturbation["params"] = {}
for idx, single_param in enumerate(params):
a_perturbation["params"][single_param] = float(values[idx])
a_perturbation["update_mode"] = update_mode
a_perturbation["axes"] = axes
perturbations_list.append(a_perturbation)
else:
set_xyz_lambda_zero = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration,\
"params":{"lambda":0.000},"update_mode":"replace","axes":["x","y","z"]}
perturbations_list.append(set_xyz_lambda_zero)
return perturbations_list
def main():
parser = make_option_parser()
opts, args = parser.parse_args()
if opts.output is None:
parser.print_help()
exit()
write_options_to_log("log.txt", opts)
verbose = opts.verbose
#Check timepoints
check_perturbation_timepoint(opts.perturbation_timepoint,opts.n_timepoints)
#Set the base parameters for microbiome change over time
#in unperturbed individuals.
individual_base_params = {"lambda":opts.L,"delta":opts.delta,\
"interindividual_variation":opts.interindividual_variation}
if opts.fixed_start_pos:
try:
x,y,z = map(float,opts.fixed_start_pos.split(","))
individual_base_params['x']=x
individual_base_params['y']=y
individual_base_params['z']=z
except:
print ("Supplied value for fixed start position after parsing:",opts.fixed_start_pos)
raise ValueError('Problem with --fixed_start_pos. Got %s Please supply x,y,z values in the range (-1,1) separated by commas and enclosed in quotes. Example: "0.1,-0.2,0.3"'% opts.fixed_start_pos)
#Set up the treatments to be applied
perturbations = parse_perturbation_file(opts.pert_file_path,\
opts.perturbation_timepoint, opts.perturbation_duration)
treatments = [[], perturbations]
treatment_names = opts.treatment_names.split(",")
if verbose:
print("Raw number of individuals from user:",opts.n_individuals)
print("n_individuals",opts.n_individuals.split(','))
n_individuals = list(map(int,opts.n_individuals.split(",")))
if verbose:
print ("**Experiment Design**")
print ("treatments:",treatment_names)
print ("n_individuals:",n_individuals)
print ("interindividual_variation",opts.interindividual_variation)
print ("treatment_effects:",treatments)
print ("individual_base_params:",individual_base_params)
experiment = Experiment(treatment_names,n_individuals,opts.n_timepoints,\
individual_base_params,treatments,opts.interindividual_variation, verbose)
experiment.simulate_timesteps(0,opts.n_timepoints, verbose)
experiment.write_to_movie_file(opts.output, verbose)
if __name__ == "__main__":
main()
| """
Raise ValueError if perturbation_timepoint is < 0 or >n_timepoints
:param perturbation_timepoint: defined timepoint for perturbation application
:param n_timepoints: number of timepoints
"""
if perturbation_timepoint and perturbation_timepoint >= n_timepoints:
raise ValueError("Perturbation timepoint must be before the last timepoint")
if perturbation_timepoint < 0:
raise ValueError("Perturbation timepoint must be positive") | identifier_body |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "[email protected]"
__status__ = "Development"
import karenina.visualization
from karenina.experiment import Experiment
from optparse import OptionParser
from optparse import OptionGroup
from os.path import join,isdir,realpath,dirname
import os
from pkg_resources import resource_filename
from os import makedirs
import pandas as pd
def make_option_parser():
"""Return an optparse OptionParser object"""
parser = OptionParser(usage = "%prog -o ./simulation_results",
description = "This script simulates microbiome " +
"change over time using Ornstein-Uhlenbeck (OU) models. These are " +
"similar to Brownian motion models, with the exception that they " +
"include reversion to a mean. Output is a tab-delimited data table " +
"and figures.",
version = __version__)
required_options = OptionGroup(parser, "Required options")
required_options.add_option('-o','--output', type="string",
help='the output folder for the simulation results')
parser.add_option_group(required_options)
optional_options = OptionGroup(parser, "Optional options")
optional_options.add_option('--pert_file_path',\
default = os.path.abspath(resource_filename('karenina.data','set_xyz_lambda_zero.tsv')),\
type = "string",\
help = 'file path to a perturbation file specifying parameters for' +
' the simulation results [default: %default]')
optional_options.add_option('--treatment_names',\
default="control,destabilizing_treatment",type="string",\
help="Comma seperated list of treatment named [default:%default]")
optional_options.add_option('-n','--n_individuals',\
default="35,35",type="string",\
help='Comma-separated number of individuals to simulate per treatment.'+\
'Note: This value must be enclosed in quotes. Example: "35,35". [default: %default]')
optional_options.add_option('-t', '--n_timepoints',default=10, type="int",\
help='Number of timepoints to simulate. (One number, which is the ' +
'same for all treatments) [default: %default]')
optional_options.add_option('-p','--perturbation_timepoint',\
default=5,type="int",\
help='Timepoint at which to apply a perturbation. Must be less than ' +
'--n_timepoints [default: %default]')
optional_options.add_option('-d','--perturbation_duration',\
default=100,type="int",\
help='Duration that the perturbation lasts. [default: %default]')
optional_options.add_option('--interindividual_variation',
default=0.01,type="float",help='Starting variability between ' +
'individuals. [default: %default]')
optional_options.add_option('--delta',default=0.25,type="float",
help='Starting delta parameter for Brownian motion and ' +
'Ornstein-Uhlenbeck processes. A higher number indicates more ' +
'variability over time. [default: %default]')
optional_options.add_option('-l','--L',default=0.20,type="float",
help='Starting lambda parameter for Ornstein-Uhlenbeck processes. A ' +
'higher number indicates a greater tendancy to revert to the mean ' +
'value. [default: %default]')
optional_options.add_option('--fixed_start_pos',default=None,type="string",
help='Starting x,y,z position for all points, as comma separated ' +
'floating point values, e.g. 0.0,0.1,0.2. If not supplied, starting ' +
'positions will be randomized based on the interindividual_variation ' +
'parameter [default: %default]')
optional_options.add_option('-v','--verbose', action="store_true", dest="verbose", default=False,
help='-v, allows for verbose output' +
' [default: %default]')
parser.add_option_group(optional_options)
return parser
def check_perturbation_timepoint(perturbation_timepoint,n_timepoints):
"""
Raise ValueError if perturbation_timepoint is < 0 or >n_timepoints
:param perturbation_timepoint: defined timepoint for perturbation application
:param n_timepoints: number of timepoints
"""
if perturbation_timepoint and perturbation_timepoint >= n_timepoints:
raise ValueError("Perturbation timepoint must be before the last timepoint")
if perturbation_timepoint < 0:
raise ValueError("Perturbation timepoint must be positive")
def ensure_exists(output_dir):
"""
Ensure that output_dir exists
:param output_dir: path to output directory
"""
try:
makedirs(output_dir)
except OSError:
if not isdir(output_dir):
raise
def | (log, opts):
"""
Writes user's input options to log file
:param log: log filename
:param opts: options
"""
logfile = open(join(opts.output, log),"w+")
logfile_header = "#Karenina Simulation Logfile\n"
logfile.write(logfile_header)
logfile.write("Output folder: %s\n" %(str(opts.output)))
logfile.write("Treatment names: " + (str(opts.treatment_names)) + "\n")
n_individuals_line = "Number of individuals: %s\n"\
%(str(opts.n_individuals))
logfile.write(n_individuals_line)
logfile.write("Number of timepoints: " + (str(opts.n_timepoints)) + "\n")
logfile.write("Perturbation timepoint: " +
(str(opts.perturbation_timepoint)) + "\n")
logfile.write("Perturbation duration: " +
(str(opts.perturbation_duration)) + "\n")
logfile.write("Interindividual variation: " +
(str(opts.interindividual_variation)) + "\n")
logfile.write("Delta: " + (str(opts.delta)) + "\n")
logfile.write("Lambda: " + (str(opts.L)) + "\n")
logfile.write("Fixed starting position: " + (str(opts.fixed_start_pos)) +
"\n")
logfile.close()
def parse_perturbation_file(pert_file_path, perturbation_timepoint,perturbation_duration):
"""
Return a list of perturbations
infile -- a .tsv file describing one perturbation per line
assume input file is correctly formatted (no warnings if not)
NOTE: each pertubation should be in the format:
set_xyz_lambda_low = {"start":opts.perturbation_timepoint,
"end":opts.perturbation_timepoint + opts.perturbation_duration,
"params":{"lambda":0.005}, "update_mode":"replace", "axes":["x","y","z"]}
:param pert_file_path: perturbation file path
:param perturbation_timepoint: timepoint to apply perturbation
:param perturbation_duration: duration of perturbation
:return: perturbation list parsed from pert file contents
"""
perturbations_list = []
if (pert_file_path != None):
df = pd.read_csv(pert_file_path, sep = "\t")
headers_list = list(df)
for index, row in df.iterrows():
a_perturbation = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration}
required_headers_checker = {"params" : False, "values" : False,
"update_mode" : False, "axes" : False}
for header in headers_list:
header_lowercase = header.lower()
if header_lowercase in ("parameter", "parameters", "param",\
"params"):
required_headers_checker["params"] = True
params = row[header].split(",")
elif header_lowercase in ("value", "values", "val", "vals"):
required_headers_checker["values"] = True
values = str(row[header]).split(",")
elif header_lowercase in ("update_mode", "update_modes",\
"update mode", "update modes"):
required_headers_checker["update_mode"] = True
update_mode = row[header]
elif header_lowercase in ("axes", "axis"):
required_headers_checker["axes"] = True
axes = row[header].split(",")
else:
raise ValueError("Could not identify header name in " + \
"perturbations file")
missing_headers_error_message = ""
for each_checker in required_headers_checker:
if required_headers_checker[each_checker] == False:
missing_headers_error_message += each_checker + " "
if missing_headers_error_message != "":
missing_headers_error_message = "Missing the following " +\
"header(s): " + missing_headers_error_message
raise ValueError(missing_headers_error_message)
if len(params) != len(values):
raise ValueError("Number of parameters does not match the " + \
"number of values")
a_perturbation["params"] = {}
for idx, single_param in enumerate(params):
a_perturbation["params"][single_param] = float(values[idx])
a_perturbation["update_mode"] = update_mode
a_perturbation["axes"] = axes
perturbations_list.append(a_perturbation)
else:
set_xyz_lambda_zero = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration,\
"params":{"lambda":0.000},"update_mode":"replace","axes":["x","y","z"]}
perturbations_list.append(set_xyz_lambda_zero)
return perturbations_list
def main():
parser = make_option_parser()
opts, args = parser.parse_args()
if opts.output is None:
parser.print_help()
exit()
write_options_to_log("log.txt", opts)
verbose = opts.verbose
#Check timepoints
check_perturbation_timepoint(opts.perturbation_timepoint,opts.n_timepoints)
#Set the base parameters for microbiome change over time
#in unperturbed individuals.
individual_base_params = {"lambda":opts.L,"delta":opts.delta,\
"interindividual_variation":opts.interindividual_variation}
if opts.fixed_start_pos:
try:
x,y,z = map(float,opts.fixed_start_pos.split(","))
individual_base_params['x']=x
individual_base_params['y']=y
individual_base_params['z']=z
except:
print ("Supplied value for fixed start position after parsing:",opts.fixed_start_pos)
raise ValueError('Problem with --fixed_start_pos. Got %s Please supply x,y,z values in the range (-1,1) separated by commas and enclosed in quotes. Example: "0.1,-0.2,0.3"'% opts.fixed_start_pos)
#Set up the treatments to be applied
perturbations = parse_perturbation_file(opts.pert_file_path,\
opts.perturbation_timepoint, opts.perturbation_duration)
treatments = [[], perturbations]
treatment_names = opts.treatment_names.split(",")
if verbose:
print("Raw number of individuals from user:",opts.n_individuals)
print("n_individuals",opts.n_individuals.split(','))
n_individuals = list(map(int,opts.n_individuals.split(",")))
if verbose:
print ("**Experiment Design**")
print ("treatments:",treatment_names)
print ("n_individuals:",n_individuals)
print ("interindividual_variation",opts.interindividual_variation)
print ("treatment_effects:",treatments)
print ("individual_base_params:",individual_base_params)
experiment = Experiment(treatment_names,n_individuals,opts.n_timepoints,\
individual_base_params,treatments,opts.interindividual_variation, verbose)
experiment.simulate_timesteps(0,opts.n_timepoints, verbose)
experiment.write_to_movie_file(opts.output, verbose)
if __name__ == "__main__":
main()
| write_options_to_log | identifier_name |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "[email protected]"
__status__ = "Development"
import karenina.visualization
from karenina.experiment import Experiment
from optparse import OptionParser
from optparse import OptionGroup
from os.path import join,isdir,realpath,dirname
import os
from pkg_resources import resource_filename
from os import makedirs
import pandas as pd
def make_option_parser():
"""Return an optparse OptionParser object"""
parser = OptionParser(usage = "%prog -o ./simulation_results",
description = "This script simulates microbiome " +
"change over time using Ornstein-Uhlenbeck (OU) models. These are " +
"similar to Brownian motion models, with the exception that they " +
"include reversion to a mean. Output is a tab-delimited data table " +
"and figures.",
version = __version__)
required_options = OptionGroup(parser, "Required options")
required_options.add_option('-o','--output', type="string",
help='the output folder for the simulation results')
parser.add_option_group(required_options)
optional_options = OptionGroup(parser, "Optional options")
optional_options.add_option('--pert_file_path',\
default = os.path.abspath(resource_filename('karenina.data','set_xyz_lambda_zero.tsv')),\
type = "string",\
help = 'file path to a perturbation file specifying parameters for' +
' the simulation results [default: %default]')
optional_options.add_option('--treatment_names',\
default="control,destabilizing_treatment",type="string",\
help="Comma seperated list of treatment named [default:%default]")
optional_options.add_option('-n','--n_individuals',\
default="35,35",type="string",\
help='Comma-separated number of individuals to simulate per treatment.'+\
'Note: This value must be enclosed in quotes. Example: "35,35". [default: %default]')
optional_options.add_option('-t', '--n_timepoints',default=10, type="int",\
help='Number of timepoints to simulate. (One number, which is the ' +
'same for all treatments) [default: %default]')
optional_options.add_option('-p','--perturbation_timepoint',\
default=5,type="int",\
help='Timepoint at which to apply a perturbation. Must be less than ' +
'--n_timepoints [default: %default]')
optional_options.add_option('-d','--perturbation_duration',\
default=100,type="int",\
help='Duration that the perturbation lasts. [default: %default]')
optional_options.add_option('--interindividual_variation',
default=0.01,type="float",help='Starting variability between ' +
'individuals. [default: %default]')
optional_options.add_option('--delta',default=0.25,type="float",
help='Starting delta parameter for Brownian motion and ' +
'Ornstein-Uhlenbeck processes. A higher number indicates more ' +
'variability over time. [default: %default]')
optional_options.add_option('-l','--L',default=0.20,type="float",
help='Starting lambda parameter for Ornstein-Uhlenbeck processes. A ' +
'higher number indicates a greater tendancy to revert to the mean ' +
'value. [default: %default]')
optional_options.add_option('--fixed_start_pos',default=None,type="string",
help='Starting x,y,z position for all points, as comma separated ' +
'floating point values, e.g. 0.0,0.1,0.2. If not supplied, starting ' +
'positions will be randomized based on the interindividual_variation ' +
'parameter [default: %default]')
optional_options.add_option('-v','--verbose', action="store_true", dest="verbose", default=False,
help='-v, allows for verbose output' +
' [default: %default]')
parser.add_option_group(optional_options)
return parser
def check_perturbation_timepoint(perturbation_timepoint,n_timepoints):
"""
Raise ValueError if perturbation_timepoint is < 0 or >n_timepoints
:param perturbation_timepoint: defined timepoint for perturbation application
:param n_timepoints: number of timepoints
"""
if perturbation_timepoint and perturbation_timepoint >= n_timepoints:
raise ValueError("Perturbation timepoint must be before the last timepoint")
if perturbation_timepoint < 0:
raise ValueError("Perturbation timepoint must be positive")
def ensure_exists(output_dir):
"""
Ensure that output_dir exists
:param output_dir: path to output directory
"""
try:
makedirs(output_dir)
except OSError:
if not isdir(output_dir):
raise
def write_options_to_log(log, opts):
"""
Writes user's input options to log file
:param log: log filename
:param opts: options
"""
logfile = open(join(opts.output, log),"w+")
logfile_header = "#Karenina Simulation Logfile\n"
logfile.write(logfile_header)
logfile.write("Output folder: %s\n" %(str(opts.output)))
logfile.write("Treatment names: " + (str(opts.treatment_names)) + "\n")
n_individuals_line = "Number of individuals: %s\n"\
%(str(opts.n_individuals))
logfile.write(n_individuals_line)
logfile.write("Number of timepoints: " + (str(opts.n_timepoints)) + "\n")
logfile.write("Perturbation timepoint: " +
(str(opts.perturbation_timepoint)) + "\n")
logfile.write("Perturbation duration: " +
(str(opts.perturbation_duration)) + "\n")
logfile.write("Interindividual variation: " +
(str(opts.interindividual_variation)) + "\n")
logfile.write("Delta: " + (str(opts.delta)) + "\n")
logfile.write("Lambda: " + (str(opts.L)) + "\n")
logfile.write("Fixed starting position: " + (str(opts.fixed_start_pos)) +
"\n")
logfile.close()
def parse_perturbation_file(pert_file_path, perturbation_timepoint,perturbation_duration):
"""
Return a list of perturbations
infile -- a .tsv file describing one perturbation per line
assume input file is correctly formatted (no warnings if not)
NOTE: each pertubation should be in the format:
set_xyz_lambda_low = {"start":opts.perturbation_timepoint,
"end":opts.perturbation_timepoint + opts.perturbation_duration,
"params":{"lambda":0.005}, "update_mode":"replace", "axes":["x","y","z"]}
:param pert_file_path: perturbation file path
:param perturbation_timepoint: timepoint to apply perturbation
:param perturbation_duration: duration of perturbation
:return: perturbation list parsed from pert file contents
"""
perturbations_list = []
if (pert_file_path != None):
df = pd.read_csv(pert_file_path, sep = "\t")
headers_list = list(df)
for index, row in df.iterrows():
a_perturbation = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration}
required_headers_checker = {"params" : False, "values" : False,
"update_mode" : False, "axes" : False}
for header in headers_list:
header_lowercase = header.lower()
if header_lowercase in ("parameter", "parameters", "param",\
"params"):
required_headers_checker["params"] = True
params = row[header].split(",")
elif header_lowercase in ("value", "values", "val", "vals"):
required_headers_checker["values"] = True
values = str(row[header]).split(",")
elif header_lowercase in ("update_mode", "update_modes",\
"update mode", "update modes"):
required_headers_checker["update_mode"] = True
update_mode = row[header]
elif header_lowercase in ("axes", "axis"):
required_headers_checker["axes"] = True
axes = row[header].split(",")
else:
raise ValueError("Could not identify header name in " + \
"perturbations file")
missing_headers_error_message = ""
for each_checker in required_headers_checker:
if required_headers_checker[each_checker] == False:
missing_headers_error_message += each_checker + " "
if missing_headers_error_message != "":
missing_headers_error_message = "Missing the following " +\
"header(s): " + missing_headers_error_message
raise ValueError(missing_headers_error_message)
if len(params) != len(values):
raise ValueError("Number of parameters does not match the " + \
"number of values")
a_perturbation["params"] = {}
for idx, single_param in enumerate(params):
a_perturbation["params"][single_param] = float(values[idx])
a_perturbation["update_mode"] = update_mode
a_perturbation["axes"] = axes
perturbations_list.append(a_perturbation)
else:
set_xyz_lambda_zero = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration,\
"params":{"lambda":0.000},"update_mode":"replace","axes":["x","y","z"]}
perturbations_list.append(set_xyz_lambda_zero)
return perturbations_list
def main():
parser = make_option_parser()
opts, args = parser.parse_args()
if opts.output is None:
parser.print_help()
exit()
write_options_to_log("log.txt", opts)
verbose = opts.verbose
#Check timepoints
check_perturbation_timepoint(opts.perturbation_timepoint,opts.n_timepoints)
#Set the base parameters for microbiome change over time
#in unperturbed individuals.
individual_base_params = {"lambda":opts.L,"delta":opts.delta,\
"interindividual_variation":opts.interindividual_variation}
if opts.fixed_start_pos:
try:
x,y,z = map(float,opts.fixed_start_pos.split(","))
individual_base_params['x']=x
individual_base_params['y']=y
individual_base_params['z']=z
except:
print ("Supplied value for fixed start position after parsing:",opts.fixed_start_pos)
raise ValueError('Problem with --fixed_start_pos. Got %s Please supply x,y,z values in the range (-1,1) separated by commas and enclosed in quotes. Example: "0.1,-0.2,0.3"'% opts.fixed_start_pos)
| perturbations = parse_perturbation_file(opts.pert_file_path,\
opts.perturbation_timepoint, opts.perturbation_duration)
treatments = [[], perturbations]
treatment_names = opts.treatment_names.split(",")
if verbose:
print("Raw number of individuals from user:",opts.n_individuals)
print("n_individuals",opts.n_individuals.split(','))
n_individuals = list(map(int,opts.n_individuals.split(",")))
if verbose:
print ("**Experiment Design**")
print ("treatments:",treatment_names)
print ("n_individuals:",n_individuals)
print ("interindividual_variation",opts.interindividual_variation)
print ("treatment_effects:",treatments)
print ("individual_base_params:",individual_base_params)
experiment = Experiment(treatment_names,n_individuals,opts.n_timepoints,\
individual_base_params,treatments,opts.interindividual_variation, verbose)
experiment.simulate_timesteps(0,opts.n_timepoints, verbose)
experiment.write_to_movie_file(opts.output, verbose)
if __name__ == "__main__":
main() | #Set up the treatments to be applied
| random_line_split |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.com/urfave/cli"
"launchpad.net/go-xdg/v0"
)
const (
platformName = "CUPS"
defaultConfigFilename = "gcp-cups-connector.config.json"
)
type Config struct {
// Enable local discovery and printing.
LocalPrintingEnable bool `json:"local_printing_enable"`
// Enable cloud discovery and printing.
CloudPrintingEnable bool `json:"cloud_printing_enable"`
// Associated with root account. XMPP credential.
XMPPJID string `json:"xmpp_jid,omitempty"`
// Associated with robot account. Used for acquiring OAuth access tokens.
RobotRefreshToken string `json:"robot_refresh_token,omitempty"`
// Associated with user account. Used for sharing GCP printers; may be omitted.
UserRefreshToken string `json:"user_refresh_token,omitempty"`
// Scope (user, group, domain) to share printers with.
ShareScope string `json:"share_scope,omitempty"`
// User-chosen name of this proxy. Should be unique per Google user account.
ProxyName string `json:"proxy_name,omitempty"`
// XMPP server FQDN.
XMPPServer string `json:"xmpp_server,omitempty"`
// XMPP server port number.
XMPPPort uint16 `json:"xmpp_port,omitempty"`
// XMPP ping timeout (give up waiting after this time).
// TODO: Rename with "gcp_" removed.
XMPPPingTimeout string `json:"gcp_xmpp_ping_timeout,omitempty"`
// XMPP ping interval (time between ping attempts).
// TODO: Rename with "gcp_" removed.
// TODO: Rename with "_default" removed.
XMPPPingInterval string `json:"gcp_xmpp_ping_interval_default,omitempty"`
// GCP API URL prefix.
GCPBaseURL string `json:"gcp_base_url,omitempty"`
// OAuth2 client ID (not unique per client).
GCPOAuthClientID string `json:"gcp_oauth_client_id,omitempty"`
// OAuth2 client secret (not unique per client).
GCPOAuthClientSecret string `json:"gcp_oauth_client_secret,omitempty"`
// OAuth2 auth URL.
GCPOAuthAuthURL string `json:"gcp_oauth_auth_url,omitempty"`
// OAuth2 token URL.
GCPOAuthTokenURL string `json:"gcp_oauth_token_url,omitempty"`
// Maximum quantity of jobs (data) to download concurrently.
GCPMaxConcurrentDownloads uint `json:"gcp_max_concurrent_downloads,omitempty"`
// CUPS job queue size, must be greater than zero.
// TODO: rename without cups_ prefix
NativeJobQueueSize uint `json:"cups_job_queue_size,omitempty"`
// Interval (eg 10s, 1m) between CUPS printer state polls.
// TODO: rename without cups_ prefix
NativePrinterPollInterval string `json:"cups_printer_poll_interval,omitempty"`
// Use the full username ([email protected]) in job.
// TODO: rename without cups_ prefix
CUPSJobFullUsername *bool `json:"cups_job_full_username,omitempty"`
// Add the job ID to the beginning of the job title. Useful for debugging.
PrefixJobIDToJobTitle *bool `json:"prefix_job_id_to_job_title,omitempty"`
// Prefix for all GCP printers hosted by this connector.
DisplayNamePrefix string `json:"display_name_prefix,omitempty"`
// Ignore printers with native names.
PrinterBlacklist []string `json:"printer_blacklist,omitempty"`
// Allow printers with native names.
PrinterWhitelist []string `json:"printer_whitelist,omitempty"`
// Least severity to log.
LogLevel string `json:"log_level"`
// Local only: HTTP API port range, low.
LocalPortLow uint16 `json:"local_port_low,omitempty"`
// Local only: HTTP API port range, high.
LocalPortHigh uint16 `json:"local_port_high,omitempty"`
// CUPS only: Where to place log file.
LogFileName string `json:"log_file_name"`
// CUPS only: Maximum log file size.
LogFileMaxMegabytes uint `json:"log_file_max_megabytes,omitempty"`
// CUPS only: Maximum log file quantity.
LogMaxFiles uint `json:"log_max_files,omitempty"`
// CUPS only: Log to the systemd journal instead of to files?
LogToJournal *bool `json:"log_to_journal,omitempty"`
// CUPS only: Filename of unix socket for connector-check to talk to connector.
MonitorSocketFilename string `json:"monitor_socket_filename,omitempty"`
// CUPS only: Maximum quantity of open CUPS connections.
CUPSMaxConnections uint `json:"cups_max_connections,omitempty"`
// CUPS only: timeout for opening a new connection.
CUPSConnectTimeout string `json:"cups_connect_timeout,omitempty"`
// CUPS only: printer attributes to copy to GCP.
CUPSPrinterAttributes []string `json:"cups_printer_attributes,omitempty"`
// CUPS only: non-standard PPD options to add as GCP vendor capabilities.
CUPSVendorPPDOptions []string `json:"cups_vendor_ppd_options,omitempty"`
// CUPS only: ignore printers with make/model 'Local Raw Printer'.
CUPSIgnoreRawPrinters *bool `json:"cups_ignore_raw_printers,omitempty"`
// CUPS only: ignore printers with make/model 'Local Printer Class'.
CUPSIgnoreClassPrinters *bool `json:"cups_ignore_class_printers,omitempty"`
// CUPS only: copy the CUPS printer's printer-info attribute to the GCP printer's defaultDisplayName.
// TODO: rename with cups_ prefix
CUPSCopyPrinterInfoToDisplayName *bool `json:"copy_printer_info_to_display_name,omitempty"`
}
// DefaultConfig represents reasonable default values for Config fields.
// Omitted Config fields are omitted on purpose; they are unique per
// connector instance.
var DefaultConfig = Config{
LocalPrintingEnable: true,
CloudPrintingEnable: false,
XMPPServer: "talk.google.com",
XMPPPort: 443,
XMPPPingTimeout: "5s",
XMPPPingInterval: "2m",
GCPBaseURL: "https://www.google.com/cloudprint/",
GCPOAuthClientID: "539833558011-35iq8btpgas80nrs3o7mv99hm95d4dv6.apps.googleusercontent.com",
GCPOAuthClientSecret: "V9BfPOvdiYuw12hDx5Y5nR0a",
GCPOAuthAuthURL: "https://accounts.google.com/o/oauth2/auth",
GCPOAuthTokenURL: "https://accounts.google.com/o/oauth2/token",
GCPMaxConcurrentDownloads: 5,
NativeJobQueueSize: 3,
NativePrinterPollInterval: "1m",
PrefixJobIDToJobTitle: PointerToBool(false),
DisplayNamePrefix: "",
PrinterBlacklist: []string{},
PrinterWhitelist: []string{},
LogLevel: "INFO",
LocalPortLow: 26000,
LocalPortHigh: 26999,
LogFileName: "/tmp/cloud-print-connector",
LogFileMaxMegabytes: 1,
LogMaxFiles: 3,
LogToJournal: PointerToBool(false),
MonitorSocketFilename: "/tmp/cloud-print-connector-monitor.sock",
CUPSMaxConnections: 50,
CUPSConnectTimeout: "5s",
CUPSPrinterAttributes: []string{
"cups-version",
"device-uri",
"document-format-supported",
"print-color-mode-default",
"print-color-mode-supported",
"printer-name",
"printer-info",
"printer-location",
"printer-make-and-model",
"printer-state",
"printer-state-reasons",
"printer-uuid",
"marker-names",
"marker-types",
"marker-levels",
"copies-default",
"copies-supported",
"number-up-default",
"number-up-supported",
"orientation-requested-default",
"orientation-requested-supported",
"pdf-versions-supported",
},
CUPSJobFullUsername: PointerToBool(false),
CUPSIgnoreRawPrinters: PointerToBool(true),
CUPSIgnoreClassPrinters: PointerToBool(true),
CUPSCopyPrinterInfoToDisplayName: PointerToBool(true),
}
// getConfigFilename gets the absolute filename of the config file specified by
// the ConfigFilename flag, and whether it exists.
//
// If the (relative or absolute) ConfigFilename exists, then it is returned.
// If the ConfigFilename exists in a valid XDG path, then it is returned.
// If neither of those exist, the (relative or absolute) ConfigFilename is returned.
func getConfigFilename(context *cli.Context) (string, bool) {
cf := context.GlobalString("config-filename")
if filepath.IsAbs(cf) {
// Absolute path specified; user knows what they want.
_, err := os.Stat(cf)
return cf, err == nil
}
absCF, err := filepath.Abs(cf)
if err != nil {
// syscall failure; treat as if file doesn't exist.
return cf, false
}
if _, err := os.Stat(absCF); err == nil {
// File exists on relative path.
return absCF, true
}
if xdgCF, err := xdg.Config.Find(cf); err == nil {
// File exists in an XDG directory.
return xdgCF, true
}
// Default to relative path. This is probably what the user expects if
// it wasn't found anywhere else.
return absCF, false
}
// Backfill returns a copy of this config with all missing keys set to default values.
func (c *Config) | (configMap map[string]interface{}) *Config {
b := *c.commonBackfill(configMap)
if _, exists := configMap["log_file_name"]; !exists {
b.LogFileName = DefaultConfig.LogFileName
}
if _, exists := configMap["log_file_max_megabytes"]; !exists {
b.LogFileMaxMegabytes = DefaultConfig.LogFileMaxMegabytes
}
if _, exists := configMap["log_max_files"]; !exists {
b.LogMaxFiles = DefaultConfig.LogMaxFiles
}
if _, exists := configMap["log_to_journal"]; !exists {
b.LogToJournal = DefaultConfig.LogToJournal
}
if _, exists := configMap["monitor_socket_filename"]; !exists {
b.MonitorSocketFilename = DefaultConfig.MonitorSocketFilename
}
if _, exists := configMap["cups_max_connections"]; !exists {
b.CUPSMaxConnections = DefaultConfig.CUPSMaxConnections
}
if _, exists := configMap["cups_connect_timeout"]; !exists {
b.CUPSConnectTimeout = DefaultConfig.CUPSConnectTimeout
}
if _, exists := configMap["cups_printer_attributes"]; !exists {
b.CUPSPrinterAttributes = DefaultConfig.CUPSPrinterAttributes
} else {
// Make sure all required attributes are present.
s := make(map[string]struct{}, len(b.CUPSPrinterAttributes))
for _, a := range b.CUPSPrinterAttributes {
s[a] = struct{}{}
}
for _, a := range DefaultConfig.CUPSPrinterAttributes {
if _, exists := s[a]; !exists {
b.CUPSPrinterAttributes = append(b.CUPSPrinterAttributes, a)
}
}
}
if _, exists := configMap["cups_job_full_username"]; !exists {
b.CUPSJobFullUsername = DefaultConfig.CUPSJobFullUsername
}
if _, exists := configMap["cups_ignore_raw_printers"]; !exists {
b.CUPSIgnoreRawPrinters = DefaultConfig.CUPSIgnoreRawPrinters
}
if _, exists := configMap["cups_ignore_class_printers"]; !exists {
b.CUPSIgnoreClassPrinters = DefaultConfig.CUPSIgnoreClassPrinters
}
if _, exists := configMap["copy_printer_info_to_display_name"]; !exists {
b.CUPSCopyPrinterInfoToDisplayName = DefaultConfig.CUPSCopyPrinterInfoToDisplayName
}
return &b
}
// Sparse returns a copy of this config with obvious values removed.
func (c *Config) Sparse(context *cli.Context) *Config {
s := *c.commonSparse(context)
if !context.IsSet("log-file-max-megabytes") &&
s.LogFileMaxMegabytes == DefaultConfig.LogFileMaxMegabytes {
s.LogFileMaxMegabytes = 0
}
if !context.IsSet("log-max-files") &&
s.LogMaxFiles == DefaultConfig.LogMaxFiles {
s.LogMaxFiles = 0
}
if !context.IsSet("log-to-journal") &&
reflect.DeepEqual(s.LogToJournal, DefaultConfig.LogToJournal) {
s.LogToJournal = nil
}
if !context.IsSet("monitor-socket-filename") &&
s.MonitorSocketFilename == DefaultConfig.MonitorSocketFilename {
s.MonitorSocketFilename = ""
}
if !context.IsSet("cups-max-connections") &&
s.CUPSMaxConnections == DefaultConfig.CUPSMaxConnections {
s.CUPSMaxConnections = 0
}
if !context.IsSet("cups-connect-timeout") &&
s.CUPSConnectTimeout == DefaultConfig.CUPSConnectTimeout {
s.CUPSConnectTimeout = ""
}
if reflect.DeepEqual(s.CUPSPrinterAttributes, DefaultConfig.CUPSPrinterAttributes) {
s.CUPSPrinterAttributes = nil
}
if !context.IsSet("cups-job-full-username") &&
reflect.DeepEqual(s.CUPSJobFullUsername, DefaultConfig.CUPSJobFullUsername) {
s.CUPSJobFullUsername = nil
}
if !context.IsSet("cups-ignore-raw-printers") &&
reflect.DeepEqual(s.CUPSIgnoreRawPrinters, DefaultConfig.CUPSIgnoreRawPrinters) {
s.CUPSIgnoreRawPrinters = nil
}
if !context.IsSet("cups-ignore-class-printers") &&
reflect.DeepEqual(s.CUPSIgnoreClassPrinters, DefaultConfig.CUPSIgnoreClassPrinters) {
s.CUPSIgnoreClassPrinters = nil
}
if !context.IsSet("copy-printer-info-to-display-name") &&
reflect.DeepEqual(s.CUPSCopyPrinterInfoToDisplayName, DefaultConfig.CUPSCopyPrinterInfoToDisplayName) {
s.CUPSCopyPrinterInfoToDisplayName = nil
}
return &s
}
| Backfill | identifier_name |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.com/urfave/cli"
"launchpad.net/go-xdg/v0"
)
const (
platformName = "CUPS"
defaultConfigFilename = "gcp-cups-connector.config.json"
)
type Config struct {
// Enable local discovery and printing. | CloudPrintingEnable bool `json:"cloud_printing_enable"`
// Associated with root account. XMPP credential.
XMPPJID string `json:"xmpp_jid,omitempty"`
// Associated with robot account. Used for acquiring OAuth access tokens.
RobotRefreshToken string `json:"robot_refresh_token,omitempty"`
// Associated with user account. Used for sharing GCP printers; may be omitted.
UserRefreshToken string `json:"user_refresh_token,omitempty"`
// Scope (user, group, domain) to share printers with.
ShareScope string `json:"share_scope,omitempty"`
// User-chosen name of this proxy. Should be unique per Google user account.
ProxyName string `json:"proxy_name,omitempty"`
// XMPP server FQDN.
XMPPServer string `json:"xmpp_server,omitempty"`
// XMPP server port number.
XMPPPort uint16 `json:"xmpp_port,omitempty"`
// XMPP ping timeout (give up waiting after this time).
// TODO: Rename with "gcp_" removed.
XMPPPingTimeout string `json:"gcp_xmpp_ping_timeout,omitempty"`
// XMPP ping interval (time between ping attempts).
// TODO: Rename with "gcp_" removed.
// TODO: Rename with "_default" removed.
XMPPPingInterval string `json:"gcp_xmpp_ping_interval_default,omitempty"`
// GCP API URL prefix.
GCPBaseURL string `json:"gcp_base_url,omitempty"`
// OAuth2 client ID (not unique per client).
GCPOAuthClientID string `json:"gcp_oauth_client_id,omitempty"`
// OAuth2 client secret (not unique per client).
GCPOAuthClientSecret string `json:"gcp_oauth_client_secret,omitempty"`
// OAuth2 auth URL.
GCPOAuthAuthURL string `json:"gcp_oauth_auth_url,omitempty"`
// OAuth2 token URL.
GCPOAuthTokenURL string `json:"gcp_oauth_token_url,omitempty"`
// Maximum quantity of jobs (data) to download concurrently.
GCPMaxConcurrentDownloads uint `json:"gcp_max_concurrent_downloads,omitempty"`
// CUPS job queue size, must be greater than zero.
// TODO: rename without cups_ prefix
NativeJobQueueSize uint `json:"cups_job_queue_size,omitempty"`
// Interval (eg 10s, 1m) between CUPS printer state polls.
// TODO: rename without cups_ prefix
NativePrinterPollInterval string `json:"cups_printer_poll_interval,omitempty"`
// Use the full username ([email protected]) in job.
// TODO: rename without cups_ prefix
CUPSJobFullUsername *bool `json:"cups_job_full_username,omitempty"`
// Add the job ID to the beginning of the job title. Useful for debugging.
PrefixJobIDToJobTitle *bool `json:"prefix_job_id_to_job_title,omitempty"`
// Prefix for all GCP printers hosted by this connector.
DisplayNamePrefix string `json:"display_name_prefix,omitempty"`
// Ignore printers with native names.
PrinterBlacklist []string `json:"printer_blacklist,omitempty"`
// Allow printers with native names.
PrinterWhitelist []string `json:"printer_whitelist,omitempty"`
// Least severity to log.
LogLevel string `json:"log_level"`
// Local only: HTTP API port range, low.
LocalPortLow uint16 `json:"local_port_low,omitempty"`
// Local only: HTTP API port range, high.
LocalPortHigh uint16 `json:"local_port_high,omitempty"`
// CUPS only: Where to place log file.
LogFileName string `json:"log_file_name"`
// CUPS only: Maximum log file size.
LogFileMaxMegabytes uint `json:"log_file_max_megabytes,omitempty"`
// CUPS only: Maximum log file quantity.
LogMaxFiles uint `json:"log_max_files,omitempty"`
// CUPS only: Log to the systemd journal instead of to files?
LogToJournal *bool `json:"log_to_journal,omitempty"`
// CUPS only: Filename of unix socket for connector-check to talk to connector.
MonitorSocketFilename string `json:"monitor_socket_filename,omitempty"`
// CUPS only: Maximum quantity of open CUPS connections.
CUPSMaxConnections uint `json:"cups_max_connections,omitempty"`
// CUPS only: timeout for opening a new connection.
CUPSConnectTimeout string `json:"cups_connect_timeout,omitempty"`
// CUPS only: printer attributes to copy to GCP.
CUPSPrinterAttributes []string `json:"cups_printer_attributes,omitempty"`
// CUPS only: non-standard PPD options to add as GCP vendor capabilities.
CUPSVendorPPDOptions []string `json:"cups_vendor_ppd_options,omitempty"`
// CUPS only: ignore printers with make/model 'Local Raw Printer'.
CUPSIgnoreRawPrinters *bool `json:"cups_ignore_raw_printers,omitempty"`
// CUPS only: ignore printers with make/model 'Local Printer Class'.
CUPSIgnoreClassPrinters *bool `json:"cups_ignore_class_printers,omitempty"`
// CUPS only: copy the CUPS printer's printer-info attribute to the GCP printer's defaultDisplayName.
// TODO: rename with cups_ prefix
CUPSCopyPrinterInfoToDisplayName *bool `json:"copy_printer_info_to_display_name,omitempty"`
}
// DefaultConfig represents reasonable default values for Config fields.
// Omitted Config fields are omitted on purpose; they are unique per
// connector instance.
var DefaultConfig = Config{
LocalPrintingEnable: true,
CloudPrintingEnable: false,
XMPPServer: "talk.google.com",
XMPPPort: 443,
XMPPPingTimeout: "5s",
XMPPPingInterval: "2m",
GCPBaseURL: "https://www.google.com/cloudprint/",
GCPOAuthClientID: "539833558011-35iq8btpgas80nrs3o7mv99hm95d4dv6.apps.googleusercontent.com",
GCPOAuthClientSecret: "V9BfPOvdiYuw12hDx5Y5nR0a",
GCPOAuthAuthURL: "https://accounts.google.com/o/oauth2/auth",
GCPOAuthTokenURL: "https://accounts.google.com/o/oauth2/token",
GCPMaxConcurrentDownloads: 5,
NativeJobQueueSize: 3,
NativePrinterPollInterval: "1m",
PrefixJobIDToJobTitle: PointerToBool(false),
DisplayNamePrefix: "",
PrinterBlacklist: []string{},
PrinterWhitelist: []string{},
LogLevel: "INFO",
LocalPortLow: 26000,
LocalPortHigh: 26999,
LogFileName: "/tmp/cloud-print-connector",
LogFileMaxMegabytes: 1,
LogMaxFiles: 3,
LogToJournal: PointerToBool(false),
MonitorSocketFilename: "/tmp/cloud-print-connector-monitor.sock",
CUPSMaxConnections: 50,
CUPSConnectTimeout: "5s",
CUPSPrinterAttributes: []string{
"cups-version",
"device-uri",
"document-format-supported",
"print-color-mode-default",
"print-color-mode-supported",
"printer-name",
"printer-info",
"printer-location",
"printer-make-and-model",
"printer-state",
"printer-state-reasons",
"printer-uuid",
"marker-names",
"marker-types",
"marker-levels",
"copies-default",
"copies-supported",
"number-up-default",
"number-up-supported",
"orientation-requested-default",
"orientation-requested-supported",
"pdf-versions-supported",
},
CUPSJobFullUsername: PointerToBool(false),
CUPSIgnoreRawPrinters: PointerToBool(true),
CUPSIgnoreClassPrinters: PointerToBool(true),
CUPSCopyPrinterInfoToDisplayName: PointerToBool(true),
}
// getConfigFilename gets the absolute filename of the config file specified by
// the ConfigFilename flag, and whether it exists.
//
// If the (relative or absolute) ConfigFilename exists, then it is returned.
// If the ConfigFilename exists in a valid XDG path, then it is returned.
// If neither of those exist, the (relative or absolute) ConfigFilename is returned.
func getConfigFilename(context *cli.Context) (string, bool) {
cf := context.GlobalString("config-filename")
if filepath.IsAbs(cf) {
// Absolute path specified; user knows what they want.
_, err := os.Stat(cf)
return cf, err == nil
}
absCF, err := filepath.Abs(cf)
if err != nil {
// syscall failure; treat as if file doesn't exist.
return cf, false
}
if _, err := os.Stat(absCF); err == nil {
// File exists on relative path.
return absCF, true
}
if xdgCF, err := xdg.Config.Find(cf); err == nil {
// File exists in an XDG directory.
return xdgCF, true
}
// Default to relative path. This is probably what the user expects if
// it wasn't found anywhere else.
return absCF, false
}
// Backfill returns a copy of this config with all missing keys set to default values.
func (c *Config) Backfill(configMap map[string]interface{}) *Config {
b := *c.commonBackfill(configMap)
if _, exists := configMap["log_file_name"]; !exists {
b.LogFileName = DefaultConfig.LogFileName
}
if _, exists := configMap["log_file_max_megabytes"]; !exists {
b.LogFileMaxMegabytes = DefaultConfig.LogFileMaxMegabytes
}
if _, exists := configMap["log_max_files"]; !exists {
b.LogMaxFiles = DefaultConfig.LogMaxFiles
}
if _, exists := configMap["log_to_journal"]; !exists {
b.LogToJournal = DefaultConfig.LogToJournal
}
if _, exists := configMap["monitor_socket_filename"]; !exists {
b.MonitorSocketFilename = DefaultConfig.MonitorSocketFilename
}
if _, exists := configMap["cups_max_connections"]; !exists {
b.CUPSMaxConnections = DefaultConfig.CUPSMaxConnections
}
if _, exists := configMap["cups_connect_timeout"]; !exists {
b.CUPSConnectTimeout = DefaultConfig.CUPSConnectTimeout
}
if _, exists := configMap["cups_printer_attributes"]; !exists {
b.CUPSPrinterAttributes = DefaultConfig.CUPSPrinterAttributes
} else {
// Make sure all required attributes are present.
s := make(map[string]struct{}, len(b.CUPSPrinterAttributes))
for _, a := range b.CUPSPrinterAttributes {
s[a] = struct{}{}
}
for _, a := range DefaultConfig.CUPSPrinterAttributes {
if _, exists := s[a]; !exists {
b.CUPSPrinterAttributes = append(b.CUPSPrinterAttributes, a)
}
}
}
if _, exists := configMap["cups_job_full_username"]; !exists {
b.CUPSJobFullUsername = DefaultConfig.CUPSJobFullUsername
}
if _, exists := configMap["cups_ignore_raw_printers"]; !exists {
b.CUPSIgnoreRawPrinters = DefaultConfig.CUPSIgnoreRawPrinters
}
if _, exists := configMap["cups_ignore_class_printers"]; !exists {
b.CUPSIgnoreClassPrinters = DefaultConfig.CUPSIgnoreClassPrinters
}
if _, exists := configMap["copy_printer_info_to_display_name"]; !exists {
b.CUPSCopyPrinterInfoToDisplayName = DefaultConfig.CUPSCopyPrinterInfoToDisplayName
}
return &b
}
// Sparse returns a copy of this config with obvious values removed.
func (c *Config) Sparse(context *cli.Context) *Config {
s := *c.commonSparse(context)
if !context.IsSet("log-file-max-megabytes") &&
s.LogFileMaxMegabytes == DefaultConfig.LogFileMaxMegabytes {
s.LogFileMaxMegabytes = 0
}
if !context.IsSet("log-max-files") &&
s.LogMaxFiles == DefaultConfig.LogMaxFiles {
s.LogMaxFiles = 0
}
if !context.IsSet("log-to-journal") &&
reflect.DeepEqual(s.LogToJournal, DefaultConfig.LogToJournal) {
s.LogToJournal = nil
}
if !context.IsSet("monitor-socket-filename") &&
s.MonitorSocketFilename == DefaultConfig.MonitorSocketFilename {
s.MonitorSocketFilename = ""
}
if !context.IsSet("cups-max-connections") &&
s.CUPSMaxConnections == DefaultConfig.CUPSMaxConnections {
s.CUPSMaxConnections = 0
}
if !context.IsSet("cups-connect-timeout") &&
s.CUPSConnectTimeout == DefaultConfig.CUPSConnectTimeout {
s.CUPSConnectTimeout = ""
}
if reflect.DeepEqual(s.CUPSPrinterAttributes, DefaultConfig.CUPSPrinterAttributes) {
s.CUPSPrinterAttributes = nil
}
if !context.IsSet("cups-job-full-username") &&
reflect.DeepEqual(s.CUPSJobFullUsername, DefaultConfig.CUPSJobFullUsername) {
s.CUPSJobFullUsername = nil
}
if !context.IsSet("cups-ignore-raw-printers") &&
reflect.DeepEqual(s.CUPSIgnoreRawPrinters, DefaultConfig.CUPSIgnoreRawPrinters) {
s.CUPSIgnoreRawPrinters = nil
}
if !context.IsSet("cups-ignore-class-printers") &&
reflect.DeepEqual(s.CUPSIgnoreClassPrinters, DefaultConfig.CUPSIgnoreClassPrinters) {
s.CUPSIgnoreClassPrinters = nil
}
if !context.IsSet("copy-printer-info-to-display-name") &&
reflect.DeepEqual(s.CUPSCopyPrinterInfoToDisplayName, DefaultConfig.CUPSCopyPrinterInfoToDisplayName) {
s.CUPSCopyPrinterInfoToDisplayName = nil
}
return &s
} | LocalPrintingEnable bool `json:"local_printing_enable"`
// Enable cloud discovery and printing. | random_line_split |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.com/urfave/cli"
"launchpad.net/go-xdg/v0"
)
const (
platformName = "CUPS"
defaultConfigFilename = "gcp-cups-connector.config.json"
)
type Config struct {
// Enable local discovery and printing.
LocalPrintingEnable bool `json:"local_printing_enable"`
// Enable cloud discovery and printing.
CloudPrintingEnable bool `json:"cloud_printing_enable"`
// Associated with root account. XMPP credential.
XMPPJID string `json:"xmpp_jid,omitempty"`
// Associated with robot account. Used for acquiring OAuth access tokens.
RobotRefreshToken string `json:"robot_refresh_token,omitempty"`
// Associated with user account. Used for sharing GCP printers; may be omitted.
UserRefreshToken string `json:"user_refresh_token,omitempty"`
// Scope (user, group, domain) to share printers with.
ShareScope string `json:"share_scope,omitempty"`
// User-chosen name of this proxy. Should be unique per Google user account.
ProxyName string `json:"proxy_name,omitempty"`
// XMPP server FQDN.
XMPPServer string `json:"xmpp_server,omitempty"`
// XMPP server port number.
XMPPPort uint16 `json:"xmpp_port,omitempty"`
// XMPP ping timeout (give up waiting after this time).
// TODO: Rename with "gcp_" removed.
XMPPPingTimeout string `json:"gcp_xmpp_ping_timeout,omitempty"`
// XMPP ping interval (time between ping attempts).
// TODO: Rename with "gcp_" removed.
// TODO: Rename with "_default" removed.
XMPPPingInterval string `json:"gcp_xmpp_ping_interval_default,omitempty"`
// GCP API URL prefix.
GCPBaseURL string `json:"gcp_base_url,omitempty"`
// OAuth2 client ID (not unique per client).
GCPOAuthClientID string `json:"gcp_oauth_client_id,omitempty"`
// OAuth2 client secret (not unique per client).
GCPOAuthClientSecret string `json:"gcp_oauth_client_secret,omitempty"`
// OAuth2 auth URL.
GCPOAuthAuthURL string `json:"gcp_oauth_auth_url,omitempty"`
// OAuth2 token URL.
GCPOAuthTokenURL string `json:"gcp_oauth_token_url,omitempty"`
// Maximum quantity of jobs (data) to download concurrently.
GCPMaxConcurrentDownloads uint `json:"gcp_max_concurrent_downloads,omitempty"`
// CUPS job queue size, must be greater than zero.
// TODO: rename without cups_ prefix
NativeJobQueueSize uint `json:"cups_job_queue_size,omitempty"`
// Interval (eg 10s, 1m) between CUPS printer state polls.
// TODO: rename without cups_ prefix
NativePrinterPollInterval string `json:"cups_printer_poll_interval,omitempty"`
// Use the full username ([email protected]) in job.
// TODO: rename without cups_ prefix
CUPSJobFullUsername *bool `json:"cups_job_full_username,omitempty"`
// Add the job ID to the beginning of the job title. Useful for debugging.
PrefixJobIDToJobTitle *bool `json:"prefix_job_id_to_job_title,omitempty"`
// Prefix for all GCP printers hosted by this connector.
DisplayNamePrefix string `json:"display_name_prefix,omitempty"`
// Ignore printers with native names.
PrinterBlacklist []string `json:"printer_blacklist,omitempty"`
// Allow printers with native names.
PrinterWhitelist []string `json:"printer_whitelist,omitempty"`
// Least severity to log.
LogLevel string `json:"log_level"`
// Local only: HTTP API port range, low.
LocalPortLow uint16 `json:"local_port_low,omitempty"`
// Local only: HTTP API port range, high.
LocalPortHigh uint16 `json:"local_port_high,omitempty"`
// CUPS only: Where to place log file.
LogFileName string `json:"log_file_name"`
// CUPS only: Maximum log file size.
LogFileMaxMegabytes uint `json:"log_file_max_megabytes,omitempty"`
// CUPS only: Maximum log file quantity.
LogMaxFiles uint `json:"log_max_files,omitempty"`
// CUPS only: Log to the systemd journal instead of to files?
LogToJournal *bool `json:"log_to_journal,omitempty"`
// CUPS only: Filename of unix socket for connector-check to talk to connector.
MonitorSocketFilename string `json:"monitor_socket_filename,omitempty"`
// CUPS only: Maximum quantity of open CUPS connections.
CUPSMaxConnections uint `json:"cups_max_connections,omitempty"`
// CUPS only: timeout for opening a new connection.
CUPSConnectTimeout string `json:"cups_connect_timeout,omitempty"`
// CUPS only: printer attributes to copy to GCP.
CUPSPrinterAttributes []string `json:"cups_printer_attributes,omitempty"`
// CUPS only: non-standard PPD options to add as GCP vendor capabilities.
CUPSVendorPPDOptions []string `json:"cups_vendor_ppd_options,omitempty"`
// CUPS only: ignore printers with make/model 'Local Raw Printer'.
CUPSIgnoreRawPrinters *bool `json:"cups_ignore_raw_printers,omitempty"`
// CUPS only: ignore printers with make/model 'Local Printer Class'.
CUPSIgnoreClassPrinters *bool `json:"cups_ignore_class_printers,omitempty"`
// CUPS only: copy the CUPS printer's printer-info attribute to the GCP printer's defaultDisplayName.
// TODO: rename with cups_ prefix
CUPSCopyPrinterInfoToDisplayName *bool `json:"copy_printer_info_to_display_name,omitempty"`
}
// DefaultConfig represents reasonable default values for Config fields.
// Omitted Config fields are omitted on purpose; they are unique per
// connector instance.
var DefaultConfig = Config{
LocalPrintingEnable: true,
CloudPrintingEnable: false,
XMPPServer: "talk.google.com",
XMPPPort: 443,
XMPPPingTimeout: "5s",
XMPPPingInterval: "2m",
GCPBaseURL: "https://www.google.com/cloudprint/",
GCPOAuthClientID: "539833558011-35iq8btpgas80nrs3o7mv99hm95d4dv6.apps.googleusercontent.com",
GCPOAuthClientSecret: "V9BfPOvdiYuw12hDx5Y5nR0a",
GCPOAuthAuthURL: "https://accounts.google.com/o/oauth2/auth",
GCPOAuthTokenURL: "https://accounts.google.com/o/oauth2/token",
GCPMaxConcurrentDownloads: 5,
NativeJobQueueSize: 3,
NativePrinterPollInterval: "1m",
PrefixJobIDToJobTitle: PointerToBool(false),
DisplayNamePrefix: "",
PrinterBlacklist: []string{},
PrinterWhitelist: []string{},
LogLevel: "INFO",
LocalPortLow: 26000,
LocalPortHigh: 26999,
LogFileName: "/tmp/cloud-print-connector",
LogFileMaxMegabytes: 1,
LogMaxFiles: 3,
LogToJournal: PointerToBool(false),
MonitorSocketFilename: "/tmp/cloud-print-connector-monitor.sock",
CUPSMaxConnections: 50,
CUPSConnectTimeout: "5s",
CUPSPrinterAttributes: []string{
"cups-version",
"device-uri",
"document-format-supported",
"print-color-mode-default",
"print-color-mode-supported",
"printer-name",
"printer-info",
"printer-location",
"printer-make-and-model",
"printer-state",
"printer-state-reasons",
"printer-uuid",
"marker-names",
"marker-types",
"marker-levels",
"copies-default",
"copies-supported",
"number-up-default",
"number-up-supported",
"orientation-requested-default",
"orientation-requested-supported",
"pdf-versions-supported",
},
CUPSJobFullUsername: PointerToBool(false),
CUPSIgnoreRawPrinters: PointerToBool(true),
CUPSIgnoreClassPrinters: PointerToBool(true),
CUPSCopyPrinterInfoToDisplayName: PointerToBool(true),
}
// getConfigFilename gets the absolute filename of the config file specified by
// the ConfigFilename flag, and whether it exists.
//
// If the (relative or absolute) ConfigFilename exists, then it is returned.
// If the ConfigFilename exists in a valid XDG path, then it is returned.
// If neither of those exist, the (relative or absolute) ConfigFilename is returned.
func getConfigFilename(context *cli.Context) (string, bool) {
cf := context.GlobalString("config-filename")
if filepath.IsAbs(cf) {
// Absolute path specified; user knows what they want.
_, err := os.Stat(cf)
return cf, err == nil
}
absCF, err := filepath.Abs(cf)
if err != nil {
// syscall failure; treat as if file doesn't exist.
return cf, false
}
if _, err := os.Stat(absCF); err == nil |
if xdgCF, err := xdg.Config.Find(cf); err == nil {
// File exists in an XDG directory.
return xdgCF, true
}
// Default to relative path. This is probably what the user expects if
// it wasn't found anywhere else.
return absCF, false
}
// Backfill returns a copy of this config with all missing keys set to default values.
func (c *Config) Backfill(configMap map[string]interface{}) *Config {
b := *c.commonBackfill(configMap)
if _, exists := configMap["log_file_name"]; !exists {
b.LogFileName = DefaultConfig.LogFileName
}
if _, exists := configMap["log_file_max_megabytes"]; !exists {
b.LogFileMaxMegabytes = DefaultConfig.LogFileMaxMegabytes
}
if _, exists := configMap["log_max_files"]; !exists {
b.LogMaxFiles = DefaultConfig.LogMaxFiles
}
if _, exists := configMap["log_to_journal"]; !exists {
b.LogToJournal = DefaultConfig.LogToJournal
}
if _, exists := configMap["monitor_socket_filename"]; !exists {
b.MonitorSocketFilename = DefaultConfig.MonitorSocketFilename
}
if _, exists := configMap["cups_max_connections"]; !exists {
b.CUPSMaxConnections = DefaultConfig.CUPSMaxConnections
}
if _, exists := configMap["cups_connect_timeout"]; !exists {
b.CUPSConnectTimeout = DefaultConfig.CUPSConnectTimeout
}
if _, exists := configMap["cups_printer_attributes"]; !exists {
b.CUPSPrinterAttributes = DefaultConfig.CUPSPrinterAttributes
} else {
// Make sure all required attributes are present.
s := make(map[string]struct{}, len(b.CUPSPrinterAttributes))
for _, a := range b.CUPSPrinterAttributes {
s[a] = struct{}{}
}
for _, a := range DefaultConfig.CUPSPrinterAttributes {
if _, exists := s[a]; !exists {
b.CUPSPrinterAttributes = append(b.CUPSPrinterAttributes, a)
}
}
}
if _, exists := configMap["cups_job_full_username"]; !exists {
b.CUPSJobFullUsername = DefaultConfig.CUPSJobFullUsername
}
if _, exists := configMap["cups_ignore_raw_printers"]; !exists {
b.CUPSIgnoreRawPrinters = DefaultConfig.CUPSIgnoreRawPrinters
}
if _, exists := configMap["cups_ignore_class_printers"]; !exists {
b.CUPSIgnoreClassPrinters = DefaultConfig.CUPSIgnoreClassPrinters
}
if _, exists := configMap["copy_printer_info_to_display_name"]; !exists {
b.CUPSCopyPrinterInfoToDisplayName = DefaultConfig.CUPSCopyPrinterInfoToDisplayName
}
return &b
}
// Sparse returns a copy of this config with obvious values removed.
func (c *Config) Sparse(context *cli.Context) *Config {
s := *c.commonSparse(context)
if !context.IsSet("log-file-max-megabytes") &&
s.LogFileMaxMegabytes == DefaultConfig.LogFileMaxMegabytes {
s.LogFileMaxMegabytes = 0
}
if !context.IsSet("log-max-files") &&
s.LogMaxFiles == DefaultConfig.LogMaxFiles {
s.LogMaxFiles = 0
}
if !context.IsSet("log-to-journal") &&
reflect.DeepEqual(s.LogToJournal, DefaultConfig.LogToJournal) {
s.LogToJournal = nil
}
if !context.IsSet("monitor-socket-filename") &&
s.MonitorSocketFilename == DefaultConfig.MonitorSocketFilename {
s.MonitorSocketFilename = ""
}
if !context.IsSet("cups-max-connections") &&
s.CUPSMaxConnections == DefaultConfig.CUPSMaxConnections {
s.CUPSMaxConnections = 0
}
if !context.IsSet("cups-connect-timeout") &&
s.CUPSConnectTimeout == DefaultConfig.CUPSConnectTimeout {
s.CUPSConnectTimeout = ""
}
if reflect.DeepEqual(s.CUPSPrinterAttributes, DefaultConfig.CUPSPrinterAttributes) {
s.CUPSPrinterAttributes = nil
}
if !context.IsSet("cups-job-full-username") &&
reflect.DeepEqual(s.CUPSJobFullUsername, DefaultConfig.CUPSJobFullUsername) {
s.CUPSJobFullUsername = nil
}
if !context.IsSet("cups-ignore-raw-printers") &&
reflect.DeepEqual(s.CUPSIgnoreRawPrinters, DefaultConfig.CUPSIgnoreRawPrinters) {
s.CUPSIgnoreRawPrinters = nil
}
if !context.IsSet("cups-ignore-class-printers") &&
reflect.DeepEqual(s.CUPSIgnoreClassPrinters, DefaultConfig.CUPSIgnoreClassPrinters) {
s.CUPSIgnoreClassPrinters = nil
}
if !context.IsSet("copy-printer-info-to-display-name") &&
reflect.DeepEqual(s.CUPSCopyPrinterInfoToDisplayName, DefaultConfig.CUPSCopyPrinterInfoToDisplayName) {
s.CUPSCopyPrinterInfoToDisplayName = nil
}
return &s
}
| {
// File exists on relative path.
return absCF, true
} | conditional_block |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.com/urfave/cli"
"launchpad.net/go-xdg/v0"
)
const (
platformName = "CUPS"
defaultConfigFilename = "gcp-cups-connector.config.json"
)
type Config struct {
// Enable local discovery and printing.
LocalPrintingEnable bool `json:"local_printing_enable"`
// Enable cloud discovery and printing.
CloudPrintingEnable bool `json:"cloud_printing_enable"`
// Associated with root account. XMPP credential.
XMPPJID string `json:"xmpp_jid,omitempty"`
// Associated with robot account. Used for acquiring OAuth access tokens.
RobotRefreshToken string `json:"robot_refresh_token,omitempty"`
// Associated with user account. Used for sharing GCP printers; may be omitted.
UserRefreshToken string `json:"user_refresh_token,omitempty"`
// Scope (user, group, domain) to share printers with.
ShareScope string `json:"share_scope,omitempty"`
// User-chosen name of this proxy. Should be unique per Google user account.
ProxyName string `json:"proxy_name,omitempty"`
// XMPP server FQDN.
XMPPServer string `json:"xmpp_server,omitempty"`
// XMPP server port number.
XMPPPort uint16 `json:"xmpp_port,omitempty"`
// XMPP ping timeout (give up waiting after this time).
// TODO: Rename with "gcp_" removed.
XMPPPingTimeout string `json:"gcp_xmpp_ping_timeout,omitempty"`
// XMPP ping interval (time between ping attempts).
// TODO: Rename with "gcp_" removed.
// TODO: Rename with "_default" removed.
XMPPPingInterval string `json:"gcp_xmpp_ping_interval_default,omitempty"`
// GCP API URL prefix.
GCPBaseURL string `json:"gcp_base_url,omitempty"`
// OAuth2 client ID (not unique per client).
GCPOAuthClientID string `json:"gcp_oauth_client_id,omitempty"`
// OAuth2 client secret (not unique per client).
GCPOAuthClientSecret string `json:"gcp_oauth_client_secret,omitempty"`
// OAuth2 auth URL.
GCPOAuthAuthURL string `json:"gcp_oauth_auth_url,omitempty"`
// OAuth2 token URL.
GCPOAuthTokenURL string `json:"gcp_oauth_token_url,omitempty"`
// Maximum quantity of jobs (data) to download concurrently.
GCPMaxConcurrentDownloads uint `json:"gcp_max_concurrent_downloads,omitempty"`
// CUPS job queue size, must be greater than zero.
// TODO: rename without cups_ prefix
NativeJobQueueSize uint `json:"cups_job_queue_size,omitempty"`
// Interval (eg 10s, 1m) between CUPS printer state polls.
// TODO: rename without cups_ prefix
NativePrinterPollInterval string `json:"cups_printer_poll_interval,omitempty"`
// Use the full username ([email protected]) in job.
// TODO: rename without cups_ prefix
CUPSJobFullUsername *bool `json:"cups_job_full_username,omitempty"`
// Add the job ID to the beginning of the job title. Useful for debugging.
PrefixJobIDToJobTitle *bool `json:"prefix_job_id_to_job_title,omitempty"`
// Prefix for all GCP printers hosted by this connector.
DisplayNamePrefix string `json:"display_name_prefix,omitempty"`
// Ignore printers with native names.
PrinterBlacklist []string `json:"printer_blacklist,omitempty"`
// Allow printers with native names.
PrinterWhitelist []string `json:"printer_whitelist,omitempty"`
// Least severity to log.
LogLevel string `json:"log_level"`
// Local only: HTTP API port range, low.
LocalPortLow uint16 `json:"local_port_low,omitempty"`
// Local only: HTTP API port range, high.
LocalPortHigh uint16 `json:"local_port_high,omitempty"`
// CUPS only: Where to place log file.
LogFileName string `json:"log_file_name"`
// CUPS only: Maximum log file size.
LogFileMaxMegabytes uint `json:"log_file_max_megabytes,omitempty"`
// CUPS only: Maximum log file quantity.
LogMaxFiles uint `json:"log_max_files,omitempty"`
// CUPS only: Log to the systemd journal instead of to files?
LogToJournal *bool `json:"log_to_journal,omitempty"`
// CUPS only: Filename of unix socket for connector-check to talk to connector.
MonitorSocketFilename string `json:"monitor_socket_filename,omitempty"`
// CUPS only: Maximum quantity of open CUPS connections.
CUPSMaxConnections uint `json:"cups_max_connections,omitempty"`
// CUPS only: timeout for opening a new connection.
CUPSConnectTimeout string `json:"cups_connect_timeout,omitempty"`
// CUPS only: printer attributes to copy to GCP.
CUPSPrinterAttributes []string `json:"cups_printer_attributes,omitempty"`
// CUPS only: non-standard PPD options to add as GCP vendor capabilities.
CUPSVendorPPDOptions []string `json:"cups_vendor_ppd_options,omitempty"`
// CUPS only: ignore printers with make/model 'Local Raw Printer'.
CUPSIgnoreRawPrinters *bool `json:"cups_ignore_raw_printers,omitempty"`
// CUPS only: ignore printers with make/model 'Local Printer Class'.
CUPSIgnoreClassPrinters *bool `json:"cups_ignore_class_printers,omitempty"`
// CUPS only: copy the CUPS printer's printer-info attribute to the GCP printer's defaultDisplayName.
// TODO: rename with cups_ prefix
CUPSCopyPrinterInfoToDisplayName *bool `json:"copy_printer_info_to_display_name,omitempty"`
}
// DefaultConfig represents reasonable default values for Config fields.
// Omitted Config fields are omitted on purpose; they are unique per
// connector instance.
var DefaultConfig = Config{
LocalPrintingEnable: true,
CloudPrintingEnable: false,
XMPPServer: "talk.google.com",
XMPPPort: 443,
XMPPPingTimeout: "5s",
XMPPPingInterval: "2m",
GCPBaseURL: "https://www.google.com/cloudprint/",
GCPOAuthClientID: "539833558011-35iq8btpgas80nrs3o7mv99hm95d4dv6.apps.googleusercontent.com",
GCPOAuthClientSecret: "V9BfPOvdiYuw12hDx5Y5nR0a",
GCPOAuthAuthURL: "https://accounts.google.com/o/oauth2/auth",
GCPOAuthTokenURL: "https://accounts.google.com/o/oauth2/token",
GCPMaxConcurrentDownloads: 5,
NativeJobQueueSize: 3,
NativePrinterPollInterval: "1m",
PrefixJobIDToJobTitle: PointerToBool(false),
DisplayNamePrefix: "",
PrinterBlacklist: []string{},
PrinterWhitelist: []string{},
LogLevel: "INFO",
LocalPortLow: 26000,
LocalPortHigh: 26999,
LogFileName: "/tmp/cloud-print-connector",
LogFileMaxMegabytes: 1,
LogMaxFiles: 3,
LogToJournal: PointerToBool(false),
MonitorSocketFilename: "/tmp/cloud-print-connector-monitor.sock",
CUPSMaxConnections: 50,
CUPSConnectTimeout: "5s",
CUPSPrinterAttributes: []string{
"cups-version",
"device-uri",
"document-format-supported",
"print-color-mode-default",
"print-color-mode-supported",
"printer-name",
"printer-info",
"printer-location",
"printer-make-and-model",
"printer-state",
"printer-state-reasons",
"printer-uuid",
"marker-names",
"marker-types",
"marker-levels",
"copies-default",
"copies-supported",
"number-up-default",
"number-up-supported",
"orientation-requested-default",
"orientation-requested-supported",
"pdf-versions-supported",
},
CUPSJobFullUsername: PointerToBool(false),
CUPSIgnoreRawPrinters: PointerToBool(true),
CUPSIgnoreClassPrinters: PointerToBool(true),
CUPSCopyPrinterInfoToDisplayName: PointerToBool(true),
}
// getConfigFilename gets the absolute filename of the config file specified by
// the ConfigFilename flag, and whether it exists.
//
// If the (relative or absolute) ConfigFilename exists, then it is returned.
// If the ConfigFilename exists in a valid XDG path, then it is returned.
// If neither of those exist, the (relative or absolute) ConfigFilename is returned.
func getConfigFilename(context *cli.Context) (string, bool) {
cf := context.GlobalString("config-filename")
if filepath.IsAbs(cf) {
// Absolute path specified; user knows what they want.
_, err := os.Stat(cf)
return cf, err == nil
}
absCF, err := filepath.Abs(cf)
if err != nil {
// syscall failure; treat as if file doesn't exist.
return cf, false
}
if _, err := os.Stat(absCF); err == nil {
// File exists on relative path.
return absCF, true
}
if xdgCF, err := xdg.Config.Find(cf); err == nil {
// File exists in an XDG directory.
return xdgCF, true
}
// Default to relative path. This is probably what the user expects if
// it wasn't found anywhere else.
return absCF, false
}
// Backfill returns a copy of this config with all missing keys set to default values.
func (c *Config) Backfill(configMap map[string]interface{}) *Config {
b := *c.commonBackfill(configMap)
if _, exists := configMap["log_file_name"]; !exists {
b.LogFileName = DefaultConfig.LogFileName
}
if _, exists := configMap["log_file_max_megabytes"]; !exists {
b.LogFileMaxMegabytes = DefaultConfig.LogFileMaxMegabytes
}
if _, exists := configMap["log_max_files"]; !exists {
b.LogMaxFiles = DefaultConfig.LogMaxFiles
}
if _, exists := configMap["log_to_journal"]; !exists {
b.LogToJournal = DefaultConfig.LogToJournal
}
if _, exists := configMap["monitor_socket_filename"]; !exists {
b.MonitorSocketFilename = DefaultConfig.MonitorSocketFilename
}
if _, exists := configMap["cups_max_connections"]; !exists {
b.CUPSMaxConnections = DefaultConfig.CUPSMaxConnections
}
if _, exists := configMap["cups_connect_timeout"]; !exists {
b.CUPSConnectTimeout = DefaultConfig.CUPSConnectTimeout
}
if _, exists := configMap["cups_printer_attributes"]; !exists {
b.CUPSPrinterAttributes = DefaultConfig.CUPSPrinterAttributes
} else {
// Make sure all required attributes are present.
s := make(map[string]struct{}, len(b.CUPSPrinterAttributes))
for _, a := range b.CUPSPrinterAttributes {
s[a] = struct{}{}
}
for _, a := range DefaultConfig.CUPSPrinterAttributes {
if _, exists := s[a]; !exists {
b.CUPSPrinterAttributes = append(b.CUPSPrinterAttributes, a)
}
}
}
if _, exists := configMap["cups_job_full_username"]; !exists {
b.CUPSJobFullUsername = DefaultConfig.CUPSJobFullUsername
}
if _, exists := configMap["cups_ignore_raw_printers"]; !exists {
b.CUPSIgnoreRawPrinters = DefaultConfig.CUPSIgnoreRawPrinters
}
if _, exists := configMap["cups_ignore_class_printers"]; !exists {
b.CUPSIgnoreClassPrinters = DefaultConfig.CUPSIgnoreClassPrinters
}
if _, exists := configMap["copy_printer_info_to_display_name"]; !exists {
b.CUPSCopyPrinterInfoToDisplayName = DefaultConfig.CUPSCopyPrinterInfoToDisplayName
}
return &b
}
// Sparse returns a copy of this config with obvious values removed.
func (c *Config) Sparse(context *cli.Context) *Config | {
s := *c.commonSparse(context)
if !context.IsSet("log-file-max-megabytes") &&
s.LogFileMaxMegabytes == DefaultConfig.LogFileMaxMegabytes {
s.LogFileMaxMegabytes = 0
}
if !context.IsSet("log-max-files") &&
s.LogMaxFiles == DefaultConfig.LogMaxFiles {
s.LogMaxFiles = 0
}
if !context.IsSet("log-to-journal") &&
reflect.DeepEqual(s.LogToJournal, DefaultConfig.LogToJournal) {
s.LogToJournal = nil
}
if !context.IsSet("monitor-socket-filename") &&
s.MonitorSocketFilename == DefaultConfig.MonitorSocketFilename {
s.MonitorSocketFilename = ""
}
if !context.IsSet("cups-max-connections") &&
s.CUPSMaxConnections == DefaultConfig.CUPSMaxConnections {
s.CUPSMaxConnections = 0
}
if !context.IsSet("cups-connect-timeout") &&
s.CUPSConnectTimeout == DefaultConfig.CUPSConnectTimeout {
s.CUPSConnectTimeout = ""
}
if reflect.DeepEqual(s.CUPSPrinterAttributes, DefaultConfig.CUPSPrinterAttributes) {
s.CUPSPrinterAttributes = nil
}
if !context.IsSet("cups-job-full-username") &&
reflect.DeepEqual(s.CUPSJobFullUsername, DefaultConfig.CUPSJobFullUsername) {
s.CUPSJobFullUsername = nil
}
if !context.IsSet("cups-ignore-raw-printers") &&
reflect.DeepEqual(s.CUPSIgnoreRawPrinters, DefaultConfig.CUPSIgnoreRawPrinters) {
s.CUPSIgnoreRawPrinters = nil
}
if !context.IsSet("cups-ignore-class-printers") &&
reflect.DeepEqual(s.CUPSIgnoreClassPrinters, DefaultConfig.CUPSIgnoreClassPrinters) {
s.CUPSIgnoreClassPrinters = nil
}
if !context.IsSet("copy-printer-info-to-display-name") &&
reflect.DeepEqual(s.CUPSCopyPrinterInfoToDisplayName, DefaultConfig.CUPSCopyPrinterInfoToDisplayName) {
s.CUPSCopyPrinterInfoToDisplayName = nil
}
return &s
} | identifier_body |
|
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache | // Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert
Err(error_response) => Err(error_response),
}
}
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn test_erraneous_ias_response_from_client_response() {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
} | .lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr { | random_line_split |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache
.lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr {
// Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert
Err(error_response) => Err(error_response),
}
}
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn | () {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
}
| test_erraneous_ias_response_from_client_response | identifier_name |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if !path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache
.lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr {
// Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> |
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn test_erraneous_ias_response_from_client_response() {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
}
| {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert
Err(error_response) => Err(error_response),
}
} | identifier_body |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_— -;:,.?!@'“”`‘’\"&*()$£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char), | FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&self) -> Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if !present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
self.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub fn unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
} |
#[error("Attempted to fill a cell already marked as black")] | random_line_split |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_— -;:,.?!@'“”`‘’\"&*()$£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char),
#[error("Attempted to fill a cell already marked as black")]
FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&s | Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if !present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
self.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub fn unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
}
| elf) -> | identifier_name |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_— -;:,.?!@'“”`‘’\"&*()$£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char),
#[error("Attempted to fill a cell already marked as black")]
FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&self) -> Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if !present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
s | n unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
}
| elf.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub f | identifier_body |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isComment } from './regexp';
import { traverse } from './traverse';
export var mdxProcessor = unified()
.use(remarkParse)
.use(remarkMdx)
.freeze();
export var AST_PROPS = ['body', 'comments', 'tokens'];
export var ES_NODE_TYPES = ['export', 'import', 'jsx'];
export var LOC_ERROR_PROPERTIES = ['column', 'index', 'lineNumber'];
export var DEFAULT_EXTENSIONS = ['.mdx'];
export var MARKDOWN_EXTENSIONS = ['.md'];
export var DEFAULT_PARSER_OPTIONS = {
comment: true,
ecmaFeatures: {
jsx: true,
},
ecmaVersion: new Date().getUTCFullYear(),
sourceType: 'module',
tokens: true,
filePath: '__placeholder__.mdx',
};
var JSX_WRAPPER_START = '<$>';
var JSX_WRAPPER_END = '</$>';
var OFFSET = JSX_WRAPPER_START.length;
var Parser = /** @class */ (function () {
function Parser() |
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; }
var value = node.value;
if (node.type !== 'jsx' || isComment(value)) {
return node;
}
var commentContent = COMMENT_CONTENT_REGEX.exec(value);
if (commentContent) {
var comments_1 = [];
var _a = node.position.start, line_1 = _a.line, column_1 = _a.column, startOffset_1 = _a.offset;
Object.assign(node, {
data: __assign(__assign({}, node.data), { jsxType: 'JSXElementWithHTMLComments', comments: comments_1,
// jsx in paragraph is considered as plain html in mdx, what means html style comments are valid
// TODO: in this case, jsx style comments could be a mistake
inline: !!parent && parent.type !== 'root' }),
value: value.replace(COMMENT_CONTENT_REGEX, function (matched, $0, $1, $2, offset) {
var endOffset = offset + matched.length;
var startLines = value.slice(0, offset).split('\n');
var endLines = value.slice(0, endOffset).split('\n');
var fixed = "{/" + '*'.repeat($0.length - 2) + $1 + '*'.repeat($2.length - 2) + "/}";
var startLineOffset = startLines.length - 1;
var endLineOffset = endLines.length - 1;
comments_1.push({
fixed: fixed,
// ! eslint ast column is 0-indexed, but unified is 1-indexed
loc: {
start: {
line: line_1 + startLineOffset,
column: last(startLines).length +
(startLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + offset,
},
end: {
line: line_1 + endLineOffset,
column: last(endLines).length + (endLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + endOffset,
},
},
origin: matched,
});
return fixed;
}),
});
}
return this._normalizeJsxNodes(node, options);
};
Parser.prototype.parse = function (code, options) {
return this.parseForESLint(code, options).ast;
};
Parser.prototype.parseForESLint = function (code, options) {
var _this = this;
var extname = path.extname(options.filePath);
var isMdx = DEFAULT_EXTENSIONS.concat(options.extensions || []).includes(extname);
var isMarkdown = MARKDOWN_EXTENSIONS.concat(options.markdownExtensions || []).includes(extname);
if (!isMdx && !isMarkdown) {
return this._eslintParse(code, options);
}
var root = mdxProcessor.parse(code);
this._ast = __assign(__assign({}, normalizePosition(root.position)), { type: 'Program', sourceType: options.sourceType || 'module', body: [], comments: [], tokens: [] });
this._services = {
JSXElementsWithHTMLComments: [],
};
if (isMdx) {
traverse(root, {
enter: function (node, parent) {
if (!ES_NODE_TYPES.includes(node.type)) {
return;
}
var normalized = _this.normalizeJsxNode(node, parent, options);
normalized = Array.isArray(normalized) ? normalized : [normalized];
normalized.forEach(function (_node) { return _this._nodeToAst(_node, options); });
},
});
}
return {
ast: this._ast,
services: this._services,
};
};
// @internal
Parser.prototype._eslintParse = function (code, options) {
if (!this._parsers || options.parser !== this._options.parser) {
this._parsers = normalizeParser(options.parser);
}
/* istanbul ignore else */
if (options.filePath && this._options !== options) {
Object.assign(this._options, options);
}
var program;
var parseError;
for (var _i = 0, _a = this._parsers; _i < _a.length; _i++) {
var parser_1 = _a[_i];
try {
program = parser_1(code, this._options);
break;
}
catch (e) {
if (!parseError) {
parseError = e;
}
}
}
if (!program && parseError) {
throw parseError;
}
/* istanbul ignore next */
return ('ast' in program && program.ast
? program
: { ast: program });
};
// fix adjacent JSX nodes
// @internal
// eslint-disable-next-line sonarjs/cognitive-complexity
Parser.prototype._normalizeJsxNodes = function (node, options) {
var value = node.value;
var program;
try {
// wrap into single element which is valid jsx but not valid jsx in mdx, so that it won't break on adjacent JSX nodes
program = this._eslintParse("" + JSX_WRAPPER_START + value + JSX_WRAPPER_END, options).ast;
}
catch (e) {
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
var start = node.position.start;
e.index += start.offset - OFFSET;
e.column =
/* istanbul ignore next */
e.lineNumber > 1 ? e.column : e.column + start.column - OFFSET;
e.lineNumber += start.line - 1;
throw e;
}
return node;
}
var expression = program
.body[0].expression;
if (!isJsxNode(expression) || expression.children.length <= 1) {
return node;
}
var _a = node.position.start, line = _a.line, offset = _a.offset;
return expression.children.reduce(function (nodes, jsNode) {
if (!isJsxNode(jsNode)) {
return nodes;
}
var nodeStart = jsNode.start, nodeEnd = jsNode.end,
/* istanbul ignore next */
_a = jsNode.loc,
/* istanbul ignore next */
_b = _a === void 0 ? {
start: { column: nodeStart, line: 1 },
end: { column: nodeEnd, line: 1 },
} : _a, start = _b.start, end = _b.end,
/* istanbul ignore next */
_c = jsNode.range,
/* istanbul ignore next */
range = _c === void 0 ? [nodeStart, nodeEnd] : _c;
var startLine = line + start.line - 1;
var endLine = line + end.line - 1;
var startOffset = range[0] - OFFSET;
var endOffset = range[1] - OFFSET;
nodes.push({
type: 'jsx',
data: nodes.length > 0 ? null : node.data,
value: value.slice(startOffset, endOffset),
position: {
start: {
line: startLine,
column: line === startLine ? start.column - OFFSET : start.column,
offset: offset + startOffset,
},
end: {
line: endLine,
column: line === startLine ? end.column - OFFSET : end.column,
offset: offset + endOffset,
},
},
});
return nodes;
}, []);
};
// @internal
Parser.prototype._nodeToAst = function (node, options) {
var _this = this;
if (node.data && node.data.jsxType === 'JSXElementWithHTMLComments') {
this._services.JSXElementsWithHTMLComments.push(node);
}
var value = node.value;
// fix #4
if (isComment(value)) {
return;
}
var _a = normalizePosition(node.position), loc = _a.loc, start = _a.start;
var startLine = loc.start.line - 1; // ! line is 1-indexed, change to 0-indexed to simplify usage
var program;
try {
program = this._eslintParse(value, options).ast;
}
catch (e) {
/* istanbul ignore if */
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
// should be handled by `_normalizeJsxNodes`, just for robustness
e.index += start;
e.column = e.lineNumber > 1 ? e.column : e.column + loc.start.column;
e.lineNumber += startLine;
}
throw e;
}
var offset = start - program.range[0];
AST_PROPS.forEach(function (prop) {
var _a;
return (_a = _this._ast[prop]).push.apply(_a, program[prop].map(function (item) {
return restoreNodeLocation(item, startLine, offset);
}));
});
};
return Parser;
}());
export { Parser };
export var parser = new Parser();
export var parse = parser.parse, parseForESLint = parser.parseForESLint;
//# sourceMappingURL=parser.js.map | {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
} | identifier_body |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isComment } from './regexp';
import { traverse } from './traverse';
export var mdxProcessor = unified()
.use(remarkParse)
.use(remarkMdx)
.freeze();
export var AST_PROPS = ['body', 'comments', 'tokens'];
export var ES_NODE_TYPES = ['export', 'import', 'jsx'];
export var LOC_ERROR_PROPERTIES = ['column', 'index', 'lineNumber'];
export var DEFAULT_EXTENSIONS = ['.mdx'];
export var MARKDOWN_EXTENSIONS = ['.md'];
export var DEFAULT_PARSER_OPTIONS = {
comment: true,
ecmaFeatures: {
jsx: true,
},
ecmaVersion: new Date().getUTCFullYear(),
sourceType: 'module',
tokens: true,
filePath: '__placeholder__.mdx',
};
var JSX_WRAPPER_START = '<$>';
var JSX_WRAPPER_END = '</$>';
var OFFSET = JSX_WRAPPER_START.length;
var Parser = /** @class */ (function () {
function Parser() {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
}
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; }
var value = node.value;
if (node.type !== 'jsx' || isComment(value)) {
return node;
}
var commentContent = COMMENT_CONTENT_REGEX.exec(value);
if (commentContent) {
var comments_1 = [];
var _a = node.position.start, line_1 = _a.line, column_1 = _a.column, startOffset_1 = _a.offset;
Object.assign(node, {
data: __assign(__assign({}, node.data), { jsxType: 'JSXElementWithHTMLComments', comments: comments_1,
// jsx in paragraph is considered as plain html in mdx, what means html style comments are valid
// TODO: in this case, jsx style comments could be a mistake
inline: !!parent && parent.type !== 'root' }),
value: value.replace(COMMENT_CONTENT_REGEX, function (matched, $0, $1, $2, offset) {
var endOffset = offset + matched.length;
var startLines = value.slice(0, offset).split('\n');
var endLines = value.slice(0, endOffset).split('\n');
var fixed = "{/" + '*'.repeat($0.length - 2) + $1 + '*'.repeat($2.length - 2) + "/}";
var startLineOffset = startLines.length - 1;
var endLineOffset = endLines.length - 1;
comments_1.push({
fixed: fixed,
// ! eslint ast column is 0-indexed, but unified is 1-indexed
loc: {
start: {
line: line_1 + startLineOffset,
column: last(startLines).length +
(startLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + offset,
},
end: {
line: line_1 + endLineOffset,
column: last(endLines).length + (endLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + endOffset,
},
},
origin: matched,
});
return fixed;
}),
});
}
return this._normalizeJsxNodes(node, options);
};
Parser.prototype.parse = function (code, options) {
return this.parseForESLint(code, options).ast;
};
Parser.prototype.parseForESLint = function (code, options) {
var _this = this;
var extname = path.extname(options.filePath);
var isMdx = DEFAULT_EXTENSIONS.concat(options.extensions || []).includes(extname);
var isMarkdown = MARKDOWN_EXTENSIONS.concat(options.markdownExtensions || []).includes(extname);
if (!isMdx && !isMarkdown) {
return this._eslintParse(code, options);
}
var root = mdxProcessor.parse(code);
this._ast = __assign(__assign({}, normalizePosition(root.position)), { type: 'Program', sourceType: options.sourceType || 'module', body: [], comments: [], tokens: [] });
this._services = {
JSXElementsWithHTMLComments: [],
};
if (isMdx) {
traverse(root, {
enter: function (node, parent) {
if (!ES_NODE_TYPES.includes(node.type)) {
return;
}
var normalized = _this.normalizeJsxNode(node, parent, options);
normalized = Array.isArray(normalized) ? normalized : [normalized];
normalized.forEach(function (_node) { return _this._nodeToAst(_node, options); });
},
});
}
return {
ast: this._ast,
services: this._services,
};
};
// @internal
Parser.prototype._eslintParse = function (code, options) {
if (!this._parsers || options.parser !== this._options.parser) {
this._parsers = normalizeParser(options.parser);
}
/* istanbul ignore else */
if (options.filePath && this._options !== options) {
Object.assign(this._options, options);
}
var program;
var parseError;
for (var _i = 0, _a = this._parsers; _i < _a.length; _i++) {
var parser_1 = _a[_i];
try {
program = parser_1(code, this._options);
break;
}
catch (e) {
if (!parseError) {
parseError = e;
}
}
}
if (!program && parseError) {
throw parseError;
}
/* istanbul ignore next */
return ('ast' in program && program.ast
? program
: { ast: program });
};
// fix adjacent JSX nodes
// @internal
// eslint-disable-next-line sonarjs/cognitive-complexity
Parser.prototype._normalizeJsxNodes = function (node, options) {
var value = node.value;
var program;
try {
// wrap into single element which is valid jsx but not valid jsx in mdx, so that it won't break on adjacent JSX nodes
program = this._eslintParse("" + JSX_WRAPPER_START + value + JSX_WRAPPER_END, options).ast;
}
catch (e) {
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
var start = node.position.start;
e.index += start.offset - OFFSET;
e.column =
/* istanbul ignore next */
e.lineNumber > 1 ? e.column : e.column + start.column - OFFSET;
e.lineNumber += start.line - 1;
throw e;
}
return node;
}
var expression = program
.body[0].expression;
if (!isJsxNode(expression) || expression.children.length <= 1) {
return node;
}
var _a = node.position.start, line = _a.line, offset = _a.offset;
return expression.children.reduce(function (nodes, jsNode) {
if (!isJsxNode(jsNode)) {
return nodes;
}
var nodeStart = jsNode.start, nodeEnd = jsNode.end,
/* istanbul ignore next */
_a = jsNode.loc,
/* istanbul ignore next */
_b = _a === void 0 ? {
start: { column: nodeStart, line: 1 },
end: { column: nodeEnd, line: 1 },
} : _a, start = _b.start, end = _b.end,
/* istanbul ignore next */
_c = jsNode.range,
/* istanbul ignore next */
range = _c === void 0 ? [nodeStart, nodeEnd] : _c;
var startLine = line + start.line - 1;
var endLine = line + end.line - 1;
var startOffset = range[0] - OFFSET;
var endOffset = range[1] - OFFSET;
nodes.push({
type: 'jsx',
data: nodes.length > 0 ? null : node.data,
value: value.slice(startOffset, endOffset),
position: {
start: {
line: startLine,
column: line === startLine ? start.column - OFFSET : start.column,
offset: offset + startOffset,
},
end: {
line: endLine,
column: line === startLine ? end.column - OFFSET : end.column,
offset: offset + endOffset,
},
},
});
return nodes;
}, []);
};
// @internal
Parser.prototype._nodeToAst = function (node, options) { | if (node.data && node.data.jsxType === 'JSXElementWithHTMLComments') {
this._services.JSXElementsWithHTMLComments.push(node);
}
var value = node.value;
// fix #4
if (isComment(value)) {
return;
}
var _a = normalizePosition(node.position), loc = _a.loc, start = _a.start;
var startLine = loc.start.line - 1; // ! line is 1-indexed, change to 0-indexed to simplify usage
var program;
try {
program = this._eslintParse(value, options).ast;
}
catch (e) {
/* istanbul ignore if */
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
// should be handled by `_normalizeJsxNodes`, just for robustness
e.index += start;
e.column = e.lineNumber > 1 ? e.column : e.column + loc.start.column;
e.lineNumber += startLine;
}
throw e;
}
var offset = start - program.range[0];
AST_PROPS.forEach(function (prop) {
var _a;
return (_a = _this._ast[prop]).push.apply(_a, program[prop].map(function (item) {
return restoreNodeLocation(item, startLine, offset);
}));
});
};
return Parser;
}());
export { Parser };
export var parser = new Parser();
export var parse = parser.parse, parseForESLint = parser.parseForESLint;
//# sourceMappingURL=parser.js.map | var _this = this; | random_line_split |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isComment } from './regexp';
import { traverse } from './traverse';
export var mdxProcessor = unified()
.use(remarkParse)
.use(remarkMdx)
.freeze();
export var AST_PROPS = ['body', 'comments', 'tokens'];
export var ES_NODE_TYPES = ['export', 'import', 'jsx'];
export var LOC_ERROR_PROPERTIES = ['column', 'index', 'lineNumber'];
export var DEFAULT_EXTENSIONS = ['.mdx'];
export var MARKDOWN_EXTENSIONS = ['.md'];
export var DEFAULT_PARSER_OPTIONS = {
comment: true,
ecmaFeatures: {
jsx: true,
},
ecmaVersion: new Date().getUTCFullYear(),
sourceType: 'module',
tokens: true,
filePath: '__placeholder__.mdx',
};
var JSX_WRAPPER_START = '<$>';
var JSX_WRAPPER_END = '</$>';
var OFFSET = JSX_WRAPPER_START.length;
var Parser = /** @class */ (function () {
function | () {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
}
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; }
var value = node.value;
if (node.type !== 'jsx' || isComment(value)) {
return node;
}
var commentContent = COMMENT_CONTENT_REGEX.exec(value);
if (commentContent) {
var comments_1 = [];
var _a = node.position.start, line_1 = _a.line, column_1 = _a.column, startOffset_1 = _a.offset;
Object.assign(node, {
data: __assign(__assign({}, node.data), { jsxType: 'JSXElementWithHTMLComments', comments: comments_1,
// jsx in paragraph is considered as plain html in mdx, what means html style comments are valid
// TODO: in this case, jsx style comments could be a mistake
inline: !!parent && parent.type !== 'root' }),
value: value.replace(COMMENT_CONTENT_REGEX, function (matched, $0, $1, $2, offset) {
var endOffset = offset + matched.length;
var startLines = value.slice(0, offset).split('\n');
var endLines = value.slice(0, endOffset).split('\n');
var fixed = "{/" + '*'.repeat($0.length - 2) + $1 + '*'.repeat($2.length - 2) + "/}";
var startLineOffset = startLines.length - 1;
var endLineOffset = endLines.length - 1;
comments_1.push({
fixed: fixed,
// ! eslint ast column is 0-indexed, but unified is 1-indexed
loc: {
start: {
line: line_1 + startLineOffset,
column: last(startLines).length +
(startLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + offset,
},
end: {
line: line_1 + endLineOffset,
column: last(endLines).length + (endLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + endOffset,
},
},
origin: matched,
});
return fixed;
}),
});
}
return this._normalizeJsxNodes(node, options);
};
Parser.prototype.parse = function (code, options) {
return this.parseForESLint(code, options).ast;
};
Parser.prototype.parseForESLint = function (code, options) {
var _this = this;
var extname = path.extname(options.filePath);
var isMdx = DEFAULT_EXTENSIONS.concat(options.extensions || []).includes(extname);
var isMarkdown = MARKDOWN_EXTENSIONS.concat(options.markdownExtensions || []).includes(extname);
if (!isMdx && !isMarkdown) {
return this._eslintParse(code, options);
}
var root = mdxProcessor.parse(code);
this._ast = __assign(__assign({}, normalizePosition(root.position)), { type: 'Program', sourceType: options.sourceType || 'module', body: [], comments: [], tokens: [] });
this._services = {
JSXElementsWithHTMLComments: [],
};
if (isMdx) {
traverse(root, {
enter: function (node, parent) {
if (!ES_NODE_TYPES.includes(node.type)) {
return;
}
var normalized = _this.normalizeJsxNode(node, parent, options);
normalized = Array.isArray(normalized) ? normalized : [normalized];
normalized.forEach(function (_node) { return _this._nodeToAst(_node, options); });
},
});
}
return {
ast: this._ast,
services: this._services,
};
};
// @internal
Parser.prototype._eslintParse = function (code, options) {
if (!this._parsers || options.parser !== this._options.parser) {
this._parsers = normalizeParser(options.parser);
}
/* istanbul ignore else */
if (options.filePath && this._options !== options) {
Object.assign(this._options, options);
}
var program;
var parseError;
for (var _i = 0, _a = this._parsers; _i < _a.length; _i++) {
var parser_1 = _a[_i];
try {
program = parser_1(code, this._options);
break;
}
catch (e) {
if (!parseError) {
parseError = e;
}
}
}
if (!program && parseError) {
throw parseError;
}
/* istanbul ignore next */
return ('ast' in program && program.ast
? program
: { ast: program });
};
// fix adjacent JSX nodes
// @internal
// eslint-disable-next-line sonarjs/cognitive-complexity
Parser.prototype._normalizeJsxNodes = function (node, options) {
var value = node.value;
var program;
try {
// wrap into single element which is valid jsx but not valid jsx in mdx, so that it won't break on adjacent JSX nodes
program = this._eslintParse("" + JSX_WRAPPER_START + value + JSX_WRAPPER_END, options).ast;
}
catch (e) {
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
var start = node.position.start;
e.index += start.offset - OFFSET;
e.column =
/* istanbul ignore next */
e.lineNumber > 1 ? e.column : e.column + start.column - OFFSET;
e.lineNumber += start.line - 1;
throw e;
}
return node;
}
var expression = program
.body[0].expression;
if (!isJsxNode(expression) || expression.children.length <= 1) {
return node;
}
var _a = node.position.start, line = _a.line, offset = _a.offset;
return expression.children.reduce(function (nodes, jsNode) {
if (!isJsxNode(jsNode)) {
return nodes;
}
var nodeStart = jsNode.start, nodeEnd = jsNode.end,
/* istanbul ignore next */
_a = jsNode.loc,
/* istanbul ignore next */
_b = _a === void 0 ? {
start: { column: nodeStart, line: 1 },
end: { column: nodeEnd, line: 1 },
} : _a, start = _b.start, end = _b.end,
/* istanbul ignore next */
_c = jsNode.range,
/* istanbul ignore next */
range = _c === void 0 ? [nodeStart, nodeEnd] : _c;
var startLine = line + start.line - 1;
var endLine = line + end.line - 1;
var startOffset = range[0] - OFFSET;
var endOffset = range[1] - OFFSET;
nodes.push({
type: 'jsx',
data: nodes.length > 0 ? null : node.data,
value: value.slice(startOffset, endOffset),
position: {
start: {
line: startLine,
column: line === startLine ? start.column - OFFSET : start.column,
offset: offset + startOffset,
},
end: {
line: endLine,
column: line === startLine ? end.column - OFFSET : end.column,
offset: offset + endOffset,
},
},
});
return nodes;
}, []);
};
// @internal
Parser.prototype._nodeToAst = function (node, options) {
var _this = this;
if (node.data && node.data.jsxType === 'JSXElementWithHTMLComments') {
this._services.JSXElementsWithHTMLComments.push(node);
}
var value = node.value;
// fix #4
if (isComment(value)) {
return;
}
var _a = normalizePosition(node.position), loc = _a.loc, start = _a.start;
var startLine = loc.start.line - 1; // ! line is 1-indexed, change to 0-indexed to simplify usage
var program;
try {
program = this._eslintParse(value, options).ast;
}
catch (e) {
/* istanbul ignore if */
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
// should be handled by `_normalizeJsxNodes`, just for robustness
e.index += start;
e.column = e.lineNumber > 1 ? e.column : e.column + loc.start.column;
e.lineNumber += startLine;
}
throw e;
}
var offset = start - program.range[0];
AST_PROPS.forEach(function (prop) {
var _a;
return (_a = _this._ast[prop]).push.apply(_a, program[prop].map(function (item) {
return restoreNodeLocation(item, startLine, offset);
}));
});
};
return Parser;
}());
export { Parser };
export var parser = new Parser();
export var parse = parser.parse, parseForESLint = parser.parseForESLint;
//# sourceMappingURL=parser.js.map | Parser | identifier_name |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isComment } from './regexp';
import { traverse } from './traverse';
export var mdxProcessor = unified()
.use(remarkParse)
.use(remarkMdx)
.freeze();
export var AST_PROPS = ['body', 'comments', 'tokens'];
export var ES_NODE_TYPES = ['export', 'import', 'jsx'];
export var LOC_ERROR_PROPERTIES = ['column', 'index', 'lineNumber'];
export var DEFAULT_EXTENSIONS = ['.mdx'];
export var MARKDOWN_EXTENSIONS = ['.md'];
export var DEFAULT_PARSER_OPTIONS = {
comment: true,
ecmaFeatures: {
jsx: true,
},
ecmaVersion: new Date().getUTCFullYear(),
sourceType: 'module',
tokens: true,
filePath: '__placeholder__.mdx',
};
var JSX_WRAPPER_START = '<$>';
var JSX_WRAPPER_END = '</$>';
var OFFSET = JSX_WRAPPER_START.length;
var Parser = /** @class */ (function () {
function Parser() {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
}
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; }
var value = node.value;
if (node.type !== 'jsx' || isComment(value)) {
return node;
}
var commentContent = COMMENT_CONTENT_REGEX.exec(value);
if (commentContent) {
var comments_1 = [];
var _a = node.position.start, line_1 = _a.line, column_1 = _a.column, startOffset_1 = _a.offset;
Object.assign(node, {
data: __assign(__assign({}, node.data), { jsxType: 'JSXElementWithHTMLComments', comments: comments_1,
// jsx in paragraph is considered as plain html in mdx, what means html style comments are valid
// TODO: in this case, jsx style comments could be a mistake
inline: !!parent && parent.type !== 'root' }),
value: value.replace(COMMENT_CONTENT_REGEX, function (matched, $0, $1, $2, offset) {
var endOffset = offset + matched.length;
var startLines = value.slice(0, offset).split('\n');
var endLines = value.slice(0, endOffset).split('\n');
var fixed = "{/" + '*'.repeat($0.length - 2) + $1 + '*'.repeat($2.length - 2) + "/}";
var startLineOffset = startLines.length - 1;
var endLineOffset = endLines.length - 1;
comments_1.push({
fixed: fixed,
// ! eslint ast column is 0-indexed, but unified is 1-indexed
loc: {
start: {
line: line_1 + startLineOffset,
column: last(startLines).length +
(startLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + offset,
},
end: {
line: line_1 + endLineOffset,
column: last(endLines).length + (endLineOffset ? 0 : column_1 - 1),
offset: startOffset_1 + endOffset,
},
},
origin: matched,
});
return fixed;
}),
});
}
return this._normalizeJsxNodes(node, options);
};
Parser.prototype.parse = function (code, options) {
return this.parseForESLint(code, options).ast;
};
Parser.prototype.parseForESLint = function (code, options) {
var _this = this;
var extname = path.extname(options.filePath);
var isMdx = DEFAULT_EXTENSIONS.concat(options.extensions || []).includes(extname);
var isMarkdown = MARKDOWN_EXTENSIONS.concat(options.markdownExtensions || []).includes(extname);
if (!isMdx && !isMarkdown) {
return this._eslintParse(code, options);
}
var root = mdxProcessor.parse(code);
this._ast = __assign(__assign({}, normalizePosition(root.position)), { type: 'Program', sourceType: options.sourceType || 'module', body: [], comments: [], tokens: [] });
this._services = {
JSXElementsWithHTMLComments: [],
};
if (isMdx) {
traverse(root, {
enter: function (node, parent) {
if (!ES_NODE_TYPES.includes(node.type)) {
return;
}
var normalized = _this.normalizeJsxNode(node, parent, options);
normalized = Array.isArray(normalized) ? normalized : [normalized];
normalized.forEach(function (_node) { return _this._nodeToAst(_node, options); });
},
});
}
return {
ast: this._ast,
services: this._services,
};
};
// @internal
Parser.prototype._eslintParse = function (code, options) {
if (!this._parsers || options.parser !== this._options.parser) |
/* istanbul ignore else */
if (options.filePath && this._options !== options) {
Object.assign(this._options, options);
}
var program;
var parseError;
for (var _i = 0, _a = this._parsers; _i < _a.length; _i++) {
var parser_1 = _a[_i];
try {
program = parser_1(code, this._options);
break;
}
catch (e) {
if (!parseError) {
parseError = e;
}
}
}
if (!program && parseError) {
throw parseError;
}
/* istanbul ignore next */
return ('ast' in program && program.ast
? program
: { ast: program });
};
// fix adjacent JSX nodes
// @internal
// eslint-disable-next-line sonarjs/cognitive-complexity
Parser.prototype._normalizeJsxNodes = function (node, options) {
var value = node.value;
var program;
try {
// wrap into single element which is valid jsx but not valid jsx in mdx, so that it won't break on adjacent JSX nodes
program = this._eslintParse("" + JSX_WRAPPER_START + value + JSX_WRAPPER_END, options).ast;
}
catch (e) {
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
var start = node.position.start;
e.index += start.offset - OFFSET;
e.column =
/* istanbul ignore next */
e.lineNumber > 1 ? e.column : e.column + start.column - OFFSET;
e.lineNumber += start.line - 1;
throw e;
}
return node;
}
var expression = program
.body[0].expression;
if (!isJsxNode(expression) || expression.children.length <= 1) {
return node;
}
var _a = node.position.start, line = _a.line, offset = _a.offset;
return expression.children.reduce(function (nodes, jsNode) {
if (!isJsxNode(jsNode)) {
return nodes;
}
var nodeStart = jsNode.start, nodeEnd = jsNode.end,
/* istanbul ignore next */
_a = jsNode.loc,
/* istanbul ignore next */
_b = _a === void 0 ? {
start: { column: nodeStart, line: 1 },
end: { column: nodeEnd, line: 1 },
} : _a, start = _b.start, end = _b.end,
/* istanbul ignore next */
_c = jsNode.range,
/* istanbul ignore next */
range = _c === void 0 ? [nodeStart, nodeEnd] : _c;
var startLine = line + start.line - 1;
var endLine = line + end.line - 1;
var startOffset = range[0] - OFFSET;
var endOffset = range[1] - OFFSET;
nodes.push({
type: 'jsx',
data: nodes.length > 0 ? null : node.data,
value: value.slice(startOffset, endOffset),
position: {
start: {
line: startLine,
column: line === startLine ? start.column - OFFSET : start.column,
offset: offset + startOffset,
},
end: {
line: endLine,
column: line === startLine ? end.column - OFFSET : end.column,
offset: offset + endOffset,
},
},
});
return nodes;
}, []);
};
// @internal
Parser.prototype._nodeToAst = function (node, options) {
var _this = this;
if (node.data && node.data.jsxType === 'JSXElementWithHTMLComments') {
this._services.JSXElementsWithHTMLComments.push(node);
}
var value = node.value;
// fix #4
if (isComment(value)) {
return;
}
var _a = normalizePosition(node.position), loc = _a.loc, start = _a.start;
var startLine = loc.start.line - 1; // ! line is 1-indexed, change to 0-indexed to simplify usage
var program;
try {
program = this._eslintParse(value, options).ast;
}
catch (e) {
/* istanbul ignore if */
if (hasProperties(e, LOC_ERROR_PROPERTIES)) {
// should be handled by `_normalizeJsxNodes`, just for robustness
e.index += start;
e.column = e.lineNumber > 1 ? e.column : e.column + loc.start.column;
e.lineNumber += startLine;
}
throw e;
}
var offset = start - program.range[0];
AST_PROPS.forEach(function (prop) {
var _a;
return (_a = _this._ast[prop]).push.apply(_a, program[prop].map(function (item) {
return restoreNodeLocation(item, startLine, offset);
}));
});
};
return Parser;
}());
export { Parser };
export var parser = new Parser();
export var parse = parser.parse, parseForESLint = parser.parseForESLint;
//# sourceMappingURL=parser.js.map | {
this._parsers = normalizeParser(options.parser);
} | conditional_block |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
################################
# our modules
# import sys
# sys.path.insert(0, path)
# from ccp_functions import *
#%%
#==============================================================================
# PORTFOLIO OPTIMIZATION
#==============================================================================
# optimization of the portfolio between start_date and end_date, at a frequency "freq"
# the signals used are X_macro and Y_assets (all the data available at the same frequency). Ex:
# Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
# X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# target vol is the volatility used for portfolio optimization
# periods is the number of historical returns used for portfolio optimization (ie. estimating historical vol and returns)
# returns a dataframe over the period [start_date, end_date], with the weights of the portfolio and its returns
def optimization(print_date, start_date, end_date, freq,
X_macro, Y_assets, target_vol, periods, granularity, method,
thresholds, reduce_indic, rescale_vol, momentum_weighting):
# dates at which we optimize the portfolio
optimization_dates = pd.date_range(start=start_date, end=end_date, freq=freq)
# output of the function = dataframe of the returns of the strategy
# columns are the weights of each asset, plus the return for the corresponding period
strategy_returns = pd.DataFrame(index=optimization_dates, columns=[Y_assets.columns.tolist() + ["Return"]], dtype=np.float64)
nb_indics = len(X_macro.columns)
momentum_weights = np.array([0.5] * nb_indics)
optimal_weights_previous = np.zeros((nb_indics, len(Y_assets.columns)))
# OUTSIDE LOOP ON THE OPTIMIZATION DATES
for date in optimization_dates:
# displays the date to show where we are in the optimization
if print_date == True:
print date
# date t-1, on which we do the optimization
date_shifted = pd.DatetimeIndex(start=date, end=date, freq=freq).shift(n=-1, freq=freq)[0]
# optimal weights for each macro indicator will be stored in this np.array
optimal_weights = np.zeros((nb_indics, len(Y_assets.columns)))
# rolling target vol
if target_vol.keys()[0] == 'rolling':
vol_target = data_slice(Y_assets, date_shifted, target_vol.values()[0]).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = vol_target
elif target_vol.keys()[0] == 'target':
vol_target = target_vol.values()[0]
vol_method = vol_target
elif target_vol.keys()[0] == 'sharpe_ratio':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = 'sharpe_ratio'
elif target_vol.keys()[0] == 'risk_aversion':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = ('risk_aversion', target_vol.values()[0])
if method != 'quantile':
granularity = 2
else:
assert granularity >=1, 'Invalid granularity (%i)' % granularity
# INSIDE LOOP ON THE INDICATORS => we do the optimization for each indicator, store the results, and then aggregate the portfolio.
for i, indicator in enumerate(X_macro.columns.tolist()):
# signal & corresponding boundaries for the ptf optimization
si = signal_intensity(X_macro[indicator], macro_data[indicator], date, method, granularity, thresholds)
sd = signal_directions(asset_classes.columns[:-1], indicator) # exclude RFR when calling this function
bnds = signal_boundaries(si, sd, granularity)
# the optimization is very sensitive to the initial weights
init_weights = list(0.5 * si * sd) + [0.0]
# optimization and storage of the optimal weights
optimal_weights[i] = portfolio_optimize(init_weights, vol_method, bnds, Y_assets, date_shifted, freq, periods)
# reduces if it's a Business Cycle indicator (Business Cycle = 0.5 * Growth + 0.5 * Inflation)
if (reduce_indic != False) & (momentum_weighting == False):
assert type(reduce_indic) == dict, 'indicators to reduce are not in the form of a dict'
if indicator in reduce_indic:
optimal_weights[i] *= reduce_indic[indicator]
# shows the performance of the portfolio optimized with respect to the indicator
# print(portfolio_stats(optimal_weights[i], data_slice(Y_assets, date, periods), freq))
# aggregate the 4 strategies
if momentum_weighting == False:
# total weighting (1 if not in the reduce_indic dictionary)
sum_indic = nb_indics
if reduce_indic != False:
sum_indic += - len(reduce_indic) + sum(reduce_indic.values())
scaled_weights = optimal_weights.sum(axis=0) / sum_indic # normal weighting
# give more weights to indicators that recently performed better
else:
momentum_returns = [0.0] * nb_indics
# compute returns from previous period
for i in range(nb_indics):
momentum_returns[i] = np.dot(np.array(optimal_weights_previous[i]).T, Y_assets.loc[date_shifted].values)
# computes percentiles
momentum_scores = [scs.percentileofscore(momentum_returns, a, 'rank')/100.0 for a in momentum_returns]
# center on 0
#momentum_scores = [a - np.average(momentum_scores) for a in momentum_scores]
momentum_weights = momentum_weighting * np.array(momentum_scores) + (1 - momentum_weighting) * momentum_weights
scaled_weights = np.dot(momentum_weights.T, optimal_weights) / momentum_weights.sum()
if rescale_vol == True:
# in-sample volatility of the strategy
strategy_volatility = portfolio_stats(scaled_weights, data_slice(Y_assets, date_shifted, periods), freq)[1]
# we scale the portfolio such that the in-sample volatility is equal to target
scaled_weights = scaled_weights[:-1] * vol_target / strategy_volatility
scaled_weights = np.array(scaled_weights.tolist() + [1.0 - scaled_weights.sum()])
# weights of the strategy
strategy_returns.loc[date] = scaled_weights.tolist() + [(scaled_weights * Y_assets.loc[date]).sum()]
# for weighting momentum
optimal_weights_previous = optimal_weights
# returns the dataframe of the weights + returns of the strategy
return strategy_returns
def period_name(period):
|
def period_names_list(periods):
"""Returns a list using function period_name."""
return [period_name(period) for period in periods]
#%%
#==============================================================================
# PARAMETRIZATION OF THE OPTIMIZATION
#==============================================================================
# we try the optimization decade by decade
freq = "M"
optimization_periods = [
("1980 01 01", "1989 12 31"),
("1990 01 01", "1999 12 31"),
("2000 01 01", "2009 12 31"),
("2010 01 01", "2017 12 31")
]
# data treated for portfolio optimization
Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# here we will store the results for each decade
strategy_results = []
# optimization parameters
target_vol = [0.1, 0.09, 0.08, 0.07] # scale the portfolio to get a volatility of 10% in sample
params = {
'print_date': True,
'start_date': first_date,
'end_date': last_date,
'freq': freq,
'X_macro': X_macro,
'Y_assets': Y_assets,
'target_vol': {'sharpe_ratio': None}, # {'target': target_vol[i]}, {'rolling': 120}, {'sharpe_ratio': None}, {'risk_aversion': 2}
'periods': 120, # 10Y => need for a large sample to compute robust volatility from monthly returns
'granularity': 2,
'method': "quantile", #zscore_robust
'thresholds': [-1.2, 1.2],
'reduce_indic': {"Growth": 0.5, "Inflation": 0.5}, # can be a dict or "False"
'rescale_vol': True, # Boolean / if used with different target_vol method, uses rolling as vol rescaler
'momentum_weighting': False
}
#%%
#==============================================================================
# OPTIMIZATION
#==============================================================================
#params['X_macro'] = X_macro[['Monetary Policy', 'Risk Sentiment', 'Inflation']]
#params['reduce_indic'] = False
for i, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
# to change parameters for different optimization periods:
# params['target_vol'] = target_vol[i]
strategy_results.append(optimization(**params))
#%%
def histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio'):
# histograms for analysis
my_df = strategy_analysis(optimization_periods, strategy_results, Y_assets, freq)
my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].plot.bar(figsize=(12,6))
plt.show()
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# testing each indicator performance separately
mydict = {}
params['print_date'] = False
params['reduce_indic'] = False
params['nb_indic'] = 1
params['granularity'] = 4
for i, indicator in enumerate(X_macro.columns.tolist()):
print(indicator)
strategy_results = []
params['X_macro'] = pd.DataFrame(X_macro[indicator])
for j, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
strategy_results.append(optimization(**params))
mydict[indicator] = strategy_results
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# prints the portfolio composition over time
for i in range(4):
period = period_name(optimization_periods[i])
strategy_results[i].drop(["Return"], axis=1).plot.bar(stacked=True, figsize=(12,6))
plt.title("Portfolio composition for the period " + period)
plt.legend()
plt.show()
#%%
# compare to a static rebalancing
naive_strategy = strategy_results
for i, period in enumerate(optimization_periods):
start_date, end_date = period
item = naive_strategy[i]
weight_eq = 0.4
weight_fi = 1 - weight_eq
item.Equities = weight_eq
item.Bonds = weight_fi
item.RFR = 0.0
item.Return = Y_assets.Equities * weight_eq + Y_assets.Bonds * weight_fi
naive_strategy_df = strategy_analysis(optimization_periods, naive_strategy, Y_assets, freq)
print(naive_strategy_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
print(my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
#%%
"""
to do
- improve signal decision process (ratios, distance from mean with IQ / std...)
- improve boundaries / optimization
- improve portfolio aggregation
- improve ptf analytics
- add asset classes
- construct inflation forecasts
- interpretation of the ptf
- tester signaux actuels + ajouter des signaux (cohérence dans le temps)
- ajouter asset classes plus granulaires
- améliorer le process de signal / portfolio optimization + aggregation
- améliorer les portfolio analytics
"""
| """Returns a string in the form '1980 - 1989'."""
year_start = period[0][:4]
year_end = period[1][:4]
return year_start + " - " + year_end | identifier_body |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
################################
# our modules
# import sys
# sys.path.insert(0, path)
# from ccp_functions import *
#%%
#==============================================================================
# PORTFOLIO OPTIMIZATION
#==============================================================================
# optimization of the portfolio between start_date and end_date, at a frequency "freq"
# the signals used are X_macro and Y_assets (all the data available at the same frequency). Ex:
# Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
# X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# target vol is the volatility used for portfolio optimization
# periods is the number of historical returns used for portfolio optimization (ie. estimating historical vol and returns)
# returns a dataframe over the period [start_date, end_date], with the weights of the portfolio and its returns
def optimization(print_date, start_date, end_date, freq,
X_macro, Y_assets, target_vol, periods, granularity, method,
thresholds, reduce_indic, rescale_vol, momentum_weighting):
# dates at which we optimize the portfolio
optimization_dates = pd.date_range(start=start_date, end=end_date, freq=freq)
# output of the function = dataframe of the returns of the strategy
# columns are the weights of each asset, plus the return for the corresponding period
strategy_returns = pd.DataFrame(index=optimization_dates, columns=[Y_assets.columns.tolist() + ["Return"]], dtype=np.float64)
nb_indics = len(X_macro.columns)
momentum_weights = np.array([0.5] * nb_indics)
optimal_weights_previous = np.zeros((nb_indics, len(Y_assets.columns)))
# OUTSIDE LOOP ON THE OPTIMIZATION DATES
for date in optimization_dates:
# displays the date to show where we are in the optimization
if print_date == True:
print date
# date t-1, on which we do the optimization
date_shifted = pd.DatetimeIndex(start=date, end=date, freq=freq).shift(n=-1, freq=freq)[0]
# optimal weights for each macro indicator will be stored in this np.array
optimal_weights = np.zeros((nb_indics, len(Y_assets.columns)))
# rolling target vol
if target_vol.keys()[0] == 'rolling':
vol_target = data_slice(Y_assets, date_shifted, target_vol.values()[0]).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = vol_target
elif target_vol.keys()[0] == 'target':
vol_target = target_vol.values()[0]
vol_method = vol_target
elif target_vol.keys()[0] == 'sharpe_ratio':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = 'sharpe_ratio'
elif target_vol.keys()[0] == 'risk_aversion':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = ('risk_aversion', target_vol.values()[0])
if method != 'quantile':
granularity = 2
else:
assert granularity >=1, 'Invalid granularity (%i)' % granularity
# INSIDE LOOP ON THE INDICATORS => we do the optimization for each indicator, store the results, and then aggregate the portfolio.
for i, indicator in enumerate(X_macro.columns.tolist()):
# signal & corresponding boundaries for the ptf optimization
si = signal_intensity(X_macro[indicator], macro_data[indicator], date, method, granularity, thresholds)
sd = signal_directions(asset_classes.columns[:-1], indicator) # exclude RFR when calling this function
bnds = signal_boundaries(si, sd, granularity)
# the optimization is very sensitive to the initial weights
init_weights = list(0.5 * si * sd) + [0.0]
# optimization and storage of the optimal weights
optimal_weights[i] = portfolio_optimize(init_weights, vol_method, bnds, Y_assets, date_shifted, freq, periods)
# reduces if it's a Business Cycle indicator (Business Cycle = 0.5 * Growth + 0.5 * Inflation)
if (reduce_indic != False) & (momentum_weighting == False):
assert type(reduce_indic) == dict, 'indicators to reduce are not in the form of a dict'
if indicator in reduce_indic:
optimal_weights[i] *= reduce_indic[indicator]
# shows the performance of the portfolio optimized with respect to the indicator
# print(portfolio_stats(optimal_weights[i], data_slice(Y_assets, date, periods), freq))
# aggregate the 4 strategies
if momentum_weighting == False:
# total weighting (1 if not in the reduce_indic dictionary)
sum_indic = nb_indics
if reduce_indic != False:
sum_indic += - len(reduce_indic) + sum(reduce_indic.values())
scaled_weights = optimal_weights.sum(axis=0) / sum_indic # normal weighting
# give more weights to indicators that recently performed better
else:
momentum_returns = [0.0] * nb_indics
# compute returns from previous period
for i in range(nb_indics):
momentum_returns[i] = np.dot(np.array(optimal_weights_previous[i]).T, Y_assets.loc[date_shifted].values)
# computes percentiles
momentum_scores = [scs.percentileofscore(momentum_returns, a, 'rank')/100.0 for a in momentum_returns]
# center on 0
#momentum_scores = [a - np.average(momentum_scores) for a in momentum_scores]
momentum_weights = momentum_weighting * np.array(momentum_scores) + (1 - momentum_weighting) * momentum_weights
scaled_weights = np.dot(momentum_weights.T, optimal_weights) / momentum_weights.sum()
if rescale_vol == True:
# in-sample volatility of the strategy
strategy_volatility = portfolio_stats(scaled_weights, data_slice(Y_assets, date_shifted, periods), freq)[1]
# we scale the portfolio such that the in-sample volatility is equal to target
scaled_weights = scaled_weights[:-1] * vol_target / strategy_volatility
scaled_weights = np.array(scaled_weights.tolist() + [1.0 - scaled_weights.sum()])
# weights of the strategy
strategy_returns.loc[date] = scaled_weights.tolist() + [(scaled_weights * Y_assets.loc[date]).sum()]
# for weighting momentum
optimal_weights_previous = optimal_weights
# returns the dataframe of the weights + returns of the strategy
return strategy_returns
def period_name(period):
"""Returns a string in the form '1980 - 1989'."""
year_start = period[0][:4]
year_end = period[1][:4]
return year_start + " - " + year_end
def period_names_list(periods):
"""Returns a list using function period_name."""
return [period_name(period) for period in periods]
#%%
#==============================================================================
# PARAMETRIZATION OF THE OPTIMIZATION
#==============================================================================
# we try the optimization decade by decade
freq = "M"
optimization_periods = [
("1980 01 01", "1989 12 31"),
("1990 01 01", "1999 12 31"),
("2000 01 01", "2009 12 31"),
("2010 01 01", "2017 12 31")
]
# data treated for portfolio optimization
Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# here we will store the results for each decade
strategy_results = []
# optimization parameters
target_vol = [0.1, 0.09, 0.08, 0.07] # scale the portfolio to get a volatility of 10% in sample
params = {
'print_date': True,
'start_date': first_date,
'end_date': last_date,
'freq': freq,
'X_macro': X_macro,
'Y_assets': Y_assets,
'target_vol': {'sharpe_ratio': None}, # {'target': target_vol[i]}, {'rolling': 120}, {'sharpe_ratio': None}, {'risk_aversion': 2}
'periods': 120, # 10Y => need for a large sample to compute robust volatility from monthly returns
'granularity': 2,
'method': "quantile", #zscore_robust
'thresholds': [-1.2, 1.2],
'reduce_indic': {"Growth": 0.5, "Inflation": 0.5}, # can be a dict or "False"
'rescale_vol': True, # Boolean / if used with different target_vol method, uses rolling as vol rescaler
'momentum_weighting': False
}
#%%
#==============================================================================
# OPTIMIZATION
#==============================================================================
#params['X_macro'] = X_macro[['Monetary Policy', 'Risk Sentiment', 'Inflation']]
#params['reduce_indic'] = False
for i, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
# to change parameters for different optimization periods:
# params['target_vol'] = target_vol[i]
strategy_results.append(optimization(**params))
#%%
def histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio'):
# histograms for analysis
my_df = strategy_analysis(optimization_periods, strategy_results, Y_assets, freq)
my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].plot.bar(figsize=(12,6))
plt.show()
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# testing each indicator performance separately
mydict = {}
params['print_date'] = False
params['reduce_indic'] = False
params['nb_indic'] = 1
params['granularity'] = 4
for i, indicator in enumerate(X_macro.columns.tolist()):
|
#%%
# prints the portfolio composition over time
for i in range(4):
period = period_name(optimization_periods[i])
strategy_results[i].drop(["Return"], axis=1).plot.bar(stacked=True, figsize=(12,6))
plt.title("Portfolio composition for the period " + period)
plt.legend()
plt.show()
#%%
# compare to a static rebalancing
naive_strategy = strategy_results
for i, period in enumerate(optimization_periods):
start_date, end_date = period
item = naive_strategy[i]
weight_eq = 0.4
weight_fi = 1 - weight_eq
item.Equities = weight_eq
item.Bonds = weight_fi
item.RFR = 0.0
item.Return = Y_assets.Equities * weight_eq + Y_assets.Bonds * weight_fi
naive_strategy_df = strategy_analysis(optimization_periods, naive_strategy, Y_assets, freq)
print(naive_strategy_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
print(my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
#%%
"""
to do
- improve signal decision process (ratios, distance from mean with IQ / std...)
- improve boundaries / optimization
- improve portfolio aggregation
- improve ptf analytics
- add asset classes
- construct inflation forecasts
- interpretation of the ptf
- tester signaux actuels + ajouter des signaux (cohérence dans le temps)
- ajouter asset classes plus granulaires
- améliorer le process de signal / portfolio optimization + aggregation
- améliorer les portfolio analytics
"""
| print(indicator)
strategy_results = []
params['X_macro'] = pd.DataFrame(X_macro[indicator])
for j, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
strategy_results.append(optimization(**params))
mydict[indicator] = strategy_results
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio') | conditional_block |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
################################
# our modules
# import sys
# sys.path.insert(0, path)
# from ccp_functions import *
#%%
#==============================================================================
# PORTFOLIO OPTIMIZATION
#==============================================================================
# optimization of the portfolio between start_date and end_date, at a frequency "freq"
# the signals used are X_macro and Y_assets (all the data available at the same frequency). Ex:
# Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
# X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# target vol is the volatility used for portfolio optimization
# periods is the number of historical returns used for portfolio optimization (ie. estimating historical vol and returns)
# returns a dataframe over the period [start_date, end_date], with the weights of the portfolio and its returns
def optimization(print_date, start_date, end_date, freq,
X_macro, Y_assets, target_vol, periods, granularity, method,
thresholds, reduce_indic, rescale_vol, momentum_weighting):
# dates at which we optimize the portfolio
optimization_dates = pd.date_range(start=start_date, end=end_date, freq=freq)
# output of the function = dataframe of the returns of the strategy
# columns are the weights of each asset, plus the return for the corresponding period
strategy_returns = pd.DataFrame(index=optimization_dates, columns=[Y_assets.columns.tolist() + ["Return"]], dtype=np.float64)
nb_indics = len(X_macro.columns)
momentum_weights = np.array([0.5] * nb_indics)
optimal_weights_previous = np.zeros((nb_indics, len(Y_assets.columns)))
# OUTSIDE LOOP ON THE OPTIMIZATION DATES
for date in optimization_dates:
# displays the date to show where we are in the optimization
if print_date == True:
print date
# date t-1, on which we do the optimization
date_shifted = pd.DatetimeIndex(start=date, end=date, freq=freq).shift(n=-1, freq=freq)[0]
# optimal weights for each macro indicator will be stored in this np.array
optimal_weights = np.zeros((nb_indics, len(Y_assets.columns)))
# rolling target vol
if target_vol.keys()[0] == 'rolling':
vol_target = data_slice(Y_assets, date_shifted, target_vol.values()[0]).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = vol_target
elif target_vol.keys()[0] == 'target':
vol_target = target_vol.values()[0]
vol_method = vol_target
elif target_vol.keys()[0] == 'sharpe_ratio':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = 'sharpe_ratio'
elif target_vol.keys()[0] == 'risk_aversion':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = ('risk_aversion', target_vol.values()[0])
if method != 'quantile':
granularity = 2
else:
assert granularity >=1, 'Invalid granularity (%i)' % granularity
# INSIDE LOOP ON THE INDICATORS => we do the optimization for each indicator, store the results, and then aggregate the portfolio.
for i, indicator in enumerate(X_macro.columns.tolist()):
# signal & corresponding boundaries for the ptf optimization
si = signal_intensity(X_macro[indicator], macro_data[indicator], date, method, granularity, thresholds)
sd = signal_directions(asset_classes.columns[:-1], indicator) # exclude RFR when calling this function
bnds = signal_boundaries(si, sd, granularity)
# the optimization is very sensitive to the initial weights
init_weights = list(0.5 * si * sd) + [0.0]
# optimization and storage of the optimal weights
optimal_weights[i] = portfolio_optimize(init_weights, vol_method, bnds, Y_assets, date_shifted, freq, periods)
# reduces if it's a Business Cycle indicator (Business Cycle = 0.5 * Growth + 0.5 * Inflation)
if (reduce_indic != False) & (momentum_weighting == False):
assert type(reduce_indic) == dict, 'indicators to reduce are not in the form of a dict'
if indicator in reduce_indic:
optimal_weights[i] *= reduce_indic[indicator]
# shows the performance of the portfolio optimized with respect to the indicator
# print(portfolio_stats(optimal_weights[i], data_slice(Y_assets, date, periods), freq))
# aggregate the 4 strategies
if momentum_weighting == False:
# total weighting (1 if not in the reduce_indic dictionary)
sum_indic = nb_indics
if reduce_indic != False:
sum_indic += - len(reduce_indic) + sum(reduce_indic.values())
scaled_weights = optimal_weights.sum(axis=0) / sum_indic # normal weighting
# give more weights to indicators that recently performed better
else:
momentum_returns = [0.0] * nb_indics
# compute returns from previous period
for i in range(nb_indics):
momentum_returns[i] = np.dot(np.array(optimal_weights_previous[i]).T, Y_assets.loc[date_shifted].values)
# computes percentiles
momentum_scores = [scs.percentileofscore(momentum_returns, a, 'rank')/100.0 for a in momentum_returns]
| # center on 0
#momentum_scores = [a - np.average(momentum_scores) for a in momentum_scores]
momentum_weights = momentum_weighting * np.array(momentum_scores) + (1 - momentum_weighting) * momentum_weights
scaled_weights = np.dot(momentum_weights.T, optimal_weights) / momentum_weights.sum()
if rescale_vol == True:
# in-sample volatility of the strategy
strategy_volatility = portfolio_stats(scaled_weights, data_slice(Y_assets, date_shifted, periods), freq)[1]
# we scale the portfolio such that the in-sample volatility is equal to target
scaled_weights = scaled_weights[:-1] * vol_target / strategy_volatility
scaled_weights = np.array(scaled_weights.tolist() + [1.0 - scaled_weights.sum()])
# weights of the strategy
strategy_returns.loc[date] = scaled_weights.tolist() + [(scaled_weights * Y_assets.loc[date]).sum()]
# for weighting momentum
optimal_weights_previous = optimal_weights
# returns the dataframe of the weights + returns of the strategy
return strategy_returns
def period_name(period):
"""Returns a string in the form '1980 - 1989'."""
year_start = period[0][:4]
year_end = period[1][:4]
return year_start + " - " + year_end
def period_names_list(periods):
"""Returns a list using function period_name."""
return [period_name(period) for period in periods]
#%%
#==============================================================================
# PARAMETRIZATION OF THE OPTIMIZATION
#==============================================================================
# we try the optimization decade by decade
freq = "M"
optimization_periods = [
("1980 01 01", "1989 12 31"),
("1990 01 01", "1999 12 31"),
("2000 01 01", "2009 12 31"),
("2010 01 01", "2017 12 31")
]
# data treated for portfolio optimization
Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# here we will store the results for each decade
strategy_results = []
# optimization parameters
target_vol = [0.1, 0.09, 0.08, 0.07] # scale the portfolio to get a volatility of 10% in sample
params = {
'print_date': True,
'start_date': first_date,
'end_date': last_date,
'freq': freq,
'X_macro': X_macro,
'Y_assets': Y_assets,
'target_vol': {'sharpe_ratio': None}, # {'target': target_vol[i]}, {'rolling': 120}, {'sharpe_ratio': None}, {'risk_aversion': 2}
'periods': 120, # 10Y => need for a large sample to compute robust volatility from monthly returns
'granularity': 2,
'method': "quantile", #zscore_robust
'thresholds': [-1.2, 1.2],
'reduce_indic': {"Growth": 0.5, "Inflation": 0.5}, # can be a dict or "False"
'rescale_vol': True, # Boolean / if used with different target_vol method, uses rolling as vol rescaler
'momentum_weighting': False
}
#%%
#==============================================================================
# OPTIMIZATION
#==============================================================================
#params['X_macro'] = X_macro[['Monetary Policy', 'Risk Sentiment', 'Inflation']]
#params['reduce_indic'] = False
for i, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
# to change parameters for different optimization periods:
# params['target_vol'] = target_vol[i]
strategy_results.append(optimization(**params))
#%%
def histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio'):
# histograms for analysis
my_df = strategy_analysis(optimization_periods, strategy_results, Y_assets, freq)
my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].plot.bar(figsize=(12,6))
plt.show()
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# testing each indicator performance separately
mydict = {}
params['print_date'] = False
params['reduce_indic'] = False
params['nb_indic'] = 1
params['granularity'] = 4
for i, indicator in enumerate(X_macro.columns.tolist()):
print(indicator)
strategy_results = []
params['X_macro'] = pd.DataFrame(X_macro[indicator])
for j, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
strategy_results.append(optimization(**params))
mydict[indicator] = strategy_results
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# prints the portfolio composition over time
for i in range(4):
period = period_name(optimization_periods[i])
strategy_results[i].drop(["Return"], axis=1).plot.bar(stacked=True, figsize=(12,6))
plt.title("Portfolio composition for the period " + period)
plt.legend()
plt.show()
#%%
# compare to a static rebalancing
naive_strategy = strategy_results
for i, period in enumerate(optimization_periods):
start_date, end_date = period
item = naive_strategy[i]
weight_eq = 0.4
weight_fi = 1 - weight_eq
item.Equities = weight_eq
item.Bonds = weight_fi
item.RFR = 0.0
item.Return = Y_assets.Equities * weight_eq + Y_assets.Bonds * weight_fi
naive_strategy_df = strategy_analysis(optimization_periods, naive_strategy, Y_assets, freq)
print(naive_strategy_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
print(my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
#%%
"""
to do
- improve signal decision process (ratios, distance from mean with IQ / std...)
- improve boundaries / optimization
- improve portfolio aggregation
- improve ptf analytics
- add asset classes
- construct inflation forecasts
- interpretation of the ptf
- tester signaux actuels + ajouter des signaux (cohérence dans le temps)
- ajouter asset classes plus granulaires
- améliorer le process de signal / portfolio optimization + aggregation
- améliorer les portfolio analytics
""" | random_line_split |
|
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
################################
# our modules
# import sys
# sys.path.insert(0, path)
# from ccp_functions import *
#%%
#==============================================================================
# PORTFOLIO OPTIMIZATION
#==============================================================================
# optimization of the portfolio between start_date and end_date, at a frequency "freq"
# the signals used are X_macro and Y_assets (all the data available at the same frequency). Ex:
# Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
# X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# target vol is the volatility used for portfolio optimization
# periods is the number of historical returns used for portfolio optimization (ie. estimating historical vol and returns)
# returns a dataframe over the period [start_date, end_date], with the weights of the portfolio and its returns
def optimization(print_date, start_date, end_date, freq,
X_macro, Y_assets, target_vol, periods, granularity, method,
thresholds, reduce_indic, rescale_vol, momentum_weighting):
# dates at which we optimize the portfolio
optimization_dates = pd.date_range(start=start_date, end=end_date, freq=freq)
# output of the function = dataframe of the returns of the strategy
# columns are the weights of each asset, plus the return for the corresponding period
strategy_returns = pd.DataFrame(index=optimization_dates, columns=[Y_assets.columns.tolist() + ["Return"]], dtype=np.float64)
nb_indics = len(X_macro.columns)
momentum_weights = np.array([0.5] * nb_indics)
optimal_weights_previous = np.zeros((nb_indics, len(Y_assets.columns)))
# OUTSIDE LOOP ON THE OPTIMIZATION DATES
for date in optimization_dates:
# displays the date to show where we are in the optimization
if print_date == True:
print date
# date t-1, on which we do the optimization
date_shifted = pd.DatetimeIndex(start=date, end=date, freq=freq).shift(n=-1, freq=freq)[0]
# optimal weights for each macro indicator will be stored in this np.array
optimal_weights = np.zeros((nb_indics, len(Y_assets.columns)))
# rolling target vol
if target_vol.keys()[0] == 'rolling':
vol_target = data_slice(Y_assets, date_shifted, target_vol.values()[0]).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = vol_target
elif target_vol.keys()[0] == 'target':
vol_target = target_vol.values()[0]
vol_method = vol_target
elif target_vol.keys()[0] == 'sharpe_ratio':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = 'sharpe_ratio'
elif target_vol.keys()[0] == 'risk_aversion':
vol_target = data_slice(Y_assets, date_shifted, periods).std().mean() * np.sqrt(annualization_factor(freq))
vol_method = ('risk_aversion', target_vol.values()[0])
if method != 'quantile':
granularity = 2
else:
assert granularity >=1, 'Invalid granularity (%i)' % granularity
# INSIDE LOOP ON THE INDICATORS => we do the optimization for each indicator, store the results, and then aggregate the portfolio.
for i, indicator in enumerate(X_macro.columns.tolist()):
# signal & corresponding boundaries for the ptf optimization
si = signal_intensity(X_macro[indicator], macro_data[indicator], date, method, granularity, thresholds)
sd = signal_directions(asset_classes.columns[:-1], indicator) # exclude RFR when calling this function
bnds = signal_boundaries(si, sd, granularity)
# the optimization is very sensitive to the initial weights
init_weights = list(0.5 * si * sd) + [0.0]
# optimization and storage of the optimal weights
optimal_weights[i] = portfolio_optimize(init_weights, vol_method, bnds, Y_assets, date_shifted, freq, periods)
# reduces if it's a Business Cycle indicator (Business Cycle = 0.5 * Growth + 0.5 * Inflation)
if (reduce_indic != False) & (momentum_weighting == False):
assert type(reduce_indic) == dict, 'indicators to reduce are not in the form of a dict'
if indicator in reduce_indic:
optimal_weights[i] *= reduce_indic[indicator]
# shows the performance of the portfolio optimized with respect to the indicator
# print(portfolio_stats(optimal_weights[i], data_slice(Y_assets, date, periods), freq))
# aggregate the 4 strategies
if momentum_weighting == False:
# total weighting (1 if not in the reduce_indic dictionary)
sum_indic = nb_indics
if reduce_indic != False:
sum_indic += - len(reduce_indic) + sum(reduce_indic.values())
scaled_weights = optimal_weights.sum(axis=0) / sum_indic # normal weighting
# give more weights to indicators that recently performed better
else:
momentum_returns = [0.0] * nb_indics
# compute returns from previous period
for i in range(nb_indics):
momentum_returns[i] = np.dot(np.array(optimal_weights_previous[i]).T, Y_assets.loc[date_shifted].values)
# computes percentiles
momentum_scores = [scs.percentileofscore(momentum_returns, a, 'rank')/100.0 for a in momentum_returns]
# center on 0
#momentum_scores = [a - np.average(momentum_scores) for a in momentum_scores]
momentum_weights = momentum_weighting * np.array(momentum_scores) + (1 - momentum_weighting) * momentum_weights
scaled_weights = np.dot(momentum_weights.T, optimal_weights) / momentum_weights.sum()
if rescale_vol == True:
# in-sample volatility of the strategy
strategy_volatility = portfolio_stats(scaled_weights, data_slice(Y_assets, date_shifted, periods), freq)[1]
# we scale the portfolio such that the in-sample volatility is equal to target
scaled_weights = scaled_weights[:-1] * vol_target / strategy_volatility
scaled_weights = np.array(scaled_weights.tolist() + [1.0 - scaled_weights.sum()])
# weights of the strategy
strategy_returns.loc[date] = scaled_weights.tolist() + [(scaled_weights * Y_assets.loc[date]).sum()]
# for weighting momentum
optimal_weights_previous = optimal_weights
# returns the dataframe of the weights + returns of the strategy
return strategy_returns
def period_name(period):
"""Returns a string in the form '1980 - 1989'."""
year_start = period[0][:4]
year_end = period[1][:4]
return year_start + " - " + year_end
def period_names_list(periods):
"""Returns a list using function period_name."""
return [period_name(period) for period in periods]
#%%
#==============================================================================
# PARAMETRIZATION OF THE OPTIMIZATION
#==============================================================================
# we try the optimization decade by decade
freq = "M"
optimization_periods = [
("1980 01 01", "1989 12 31"),
("1990 01 01", "1999 12 31"),
("2000 01 01", "2009 12 31"),
("2010 01 01", "2017 12 31")
]
# data treated for portfolio optimization
Y_assets = data_returns(asset_classes, first_date, last_date, freq, 1)
X_macro = data_lagged(macro_data, first_date, last_date, freq, 1)
# here we will store the results for each decade
strategy_results = []
# optimization parameters
target_vol = [0.1, 0.09, 0.08, 0.07] # scale the portfolio to get a volatility of 10% in sample
params = {
'print_date': True,
'start_date': first_date,
'end_date': last_date,
'freq': freq,
'X_macro': X_macro,
'Y_assets': Y_assets,
'target_vol': {'sharpe_ratio': None}, # {'target': target_vol[i]}, {'rolling': 120}, {'sharpe_ratio': None}, {'risk_aversion': 2}
'periods': 120, # 10Y => need for a large sample to compute robust volatility from monthly returns
'granularity': 2,
'method': "quantile", #zscore_robust
'thresholds': [-1.2, 1.2],
'reduce_indic': {"Growth": 0.5, "Inflation": 0.5}, # can be a dict or "False"
'rescale_vol': True, # Boolean / if used with different target_vol method, uses rolling as vol rescaler
'momentum_weighting': False
}
#%%
#==============================================================================
# OPTIMIZATION
#==============================================================================
#params['X_macro'] = X_macro[['Monetary Policy', 'Risk Sentiment', 'Inflation']]
#params['reduce_indic'] = False
for i, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
# to change parameters for different optimization periods:
# params['target_vol'] = target_vol[i]
strategy_results.append(optimization(**params))
#%%
def | (optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio'):
# histograms for analysis
my_df = strategy_analysis(optimization_periods, strategy_results, Y_assets, freq)
my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].plot.bar(figsize=(12,6))
plt.show()
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# testing each indicator performance separately
mydict = {}
params['print_date'] = False
params['reduce_indic'] = False
params['nb_indic'] = 1
params['granularity'] = 4
for i, indicator in enumerate(X_macro.columns.tolist()):
print(indicator)
strategy_results = []
params['X_macro'] = pd.DataFrame(X_macro[indicator])
for j, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
strategy_results.append(optimization(**params))
mydict[indicator] = strategy_results
histogram_analysis(optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio')
#%%
# prints the portfolio composition over time
for i in range(4):
period = period_name(optimization_periods[i])
strategy_results[i].drop(["Return"], axis=1).plot.bar(stacked=True, figsize=(12,6))
plt.title("Portfolio composition for the period " + period)
plt.legend()
plt.show()
#%%
# compare to a static rebalancing
naive_strategy = strategy_results
for i, period in enumerate(optimization_periods):
start_date, end_date = period
item = naive_strategy[i]
weight_eq = 0.4
weight_fi = 1 - weight_eq
item.Equities = weight_eq
item.Bonds = weight_fi
item.RFR = 0.0
item.Return = Y_assets.Equities * weight_eq + Y_assets.Bonds * weight_fi
naive_strategy_df = strategy_analysis(optimization_periods, naive_strategy, Y_assets, freq)
print(naive_strategy_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
print(my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].loc["Strategy"])
#%%
"""
to do
- improve signal decision process (ratios, distance from mean with IQ / std...)
- improve boundaries / optimization
- improve portfolio aggregation
- improve ptf analytics
- add asset classes
- construct inflation forecasts
- interpretation of the ptf
- tester signaux actuels + ajouter des signaux (cohérence dans le temps)
- ajouter asset classes plus granulaires
- améliorer le process de signal / portfolio optimization + aggregation
- améliorer les portfolio analytics
"""
| histogram_analysis | identifier_name |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number game - the users picks a number and the computer guesses
async function computerGuesses() {
//returns a guess that is half way between the min and max range
function makeSmartGuess(min, max) {
return min + Math.floor((max - min) / 2);
}
//cheat detector function that will return true if there is an issue with the response based on known range (true ==> lying, false ==> not lying)
function cheatDetector(min, max, guess, secretNumber, modifyRange) {
//if the computer's guess is the secret number but the user has said no, the computer calls them out for cheating
if (guess === secretNumber) {
console.log(
`\nHmmm, is your name Mufasa, 'cause I think you're a-lying...\n`
);
return true;
} else {
if (modifyRange === "h" || modifyRange === "higher") {
//if the user indicates the number is higher but the guess is already the max included value ==> returns true
if (guess + 1 > max) {
console.log(
`\nLiar, liar pants on fire! You said the number was lower than ${
max + 1
}, so it can't also be higher than ${guess}...\n`
);
return true;
}
}
if (modifyRange === "l" || modifyRange === "lower") {
//if the user indicates the number is lower but the guess is already the min included value ==> returns true
if (guess - 1 < min) {
console.log(
`\nCheater, cheater pumpkin eater! You said the number was higher than ${
min - 1
}, so it can't also be lower than ${guess}!\n`
);
return true;
}
}
return false;
}
}
//intros the game
console.log(
"Let's play a game where you (human) pick a number between 1 and a maximum, and I (computer) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//keep track of min & max for range of guesses. Default values are 1 and 100
let min = 1;
let max = 100;
//allow the user to set the high range
max = await ask("\nWhat would you like the maximum number to be? ");
//makes sure number submitted is a valid number
while (isNaN(max)) {
max = await ask(
"\nLet's try this again. Please enter a number you'd like to use as the maximum. "
);
}
//confirms the user is ready to play
let readyToPlay = await ask(
`\nHave you decided on a random number between 1 and ${max}? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
//waits until the player is ready to play by entering 'y' or 'yes'
while (readyToPlay !== "y" && readyToPlay !== "yes") {
readyToPlay = await ask(
`\nOk, I'll wait, please pick a number between 1 and ${max}. Are you ready now? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
}
//declares a variable to store the user's number to be used to detect cheating
let secretNumber = await ask(
"\nWhat is your secret number? I won't peak, I promise..."
);
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
//guard clause to check that the secret number entered is a number and within the range
while (isNaN(secretNumber) || secretNumber > max || secretNumber < 1) {
//if the input entered is not a number prompts user to re-enter secret number
if (isNaN(secretNumber)) {
secretNumber = await ask(
`\nYou must enter a number. Please enter your secret number - remember it should be between 1 and ${max}. `
);
}
//if the input is outside of the range 1 to max, prompts the user to re-enter the secret number
else {
secretNumber = await ask(
`\nRemember, the number must be between 1 and ${max}. Please choose a different secret number that is within the correct range. `
);
}
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
}
//returns the secret number the user input
console.log(
`\nYou entered ${secretNumber}. \n\nBeep. Boop. Beep. Erasing from my memory.`
);
//starts the game
console.log("\nNow I will try to guess your secret number!");
// declares the variable that will store the users response if the computer's guess is correct or not
let response = "n";
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//while the user has not responded 'y' to indicate that the computer has correctly guessed, the computer will continue making guesses
while ((response === "n") | (response === "no")) {
//sets the computer up to make a smart guess within the current range
let guess = makeSmartGuess(min, max);
//stores the users response if the computer's guess is correct or not
response = await ask(`\nIs the number ${guess}? (y/n): `);
//sanitizes response
response = response.trim().toLowerCase();
//the computer has made another guess - index number of guesses made by 1
numOfGuess += 1;
// if the computer guessed the correct number ==> user responds 'y' and game gives victory message
if (response === "y" || response === "yes") {
console.log(
`\nAha! Your number was ${guess}! I win!\nIt only took me ${numOfGuess} tries to correctly guess your number.`
);
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
//if the computer guessed wrong the user answers 'n' and computer asks if the number is higher or lower
else {
//if the min, max and guess are all equal then the computer has correctly narrowed down the number and the user is cheating
if (cheatDetector(min, max, guess, secretNumber, "")) {
console.log("Please be honest this time....");
//since numOfGuess will iterate once more when the computer prompts the users again we need to walk it down by 1 to correctly indicate the number of guesses taken
numOfGuess -= 1;
} else {
console.log("\nBummer.");
//declare the variable modifyRange that will hold h/l
let modifyRange = "";
while (!modifyRange) {
//stores the h/l response from the user in modifyRange
modifyRange = await ask(
`Is the number higher (h) or lower (l) than ${guess}? `
);
//sanitizes modifyRange
modifyRange = modifyRange.trim().toLowerCase();
// if the number is higher, the guess+1 is the new min of the range
if (modifyRange === "h" || modifyRange === "higher") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
min = guess + 1;
}
}
//if the number is lower, the guess-1 is the new max of the range
else if (modifyRange === "l" || modifyRange === "lower") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
max = guess - 1;
}
}
}
}
}
}
}
}
//Guess The Number game - the computer picks a number and the user guesses
async function userGuesses() {
//returns a random guess between the given min and max range
function | (min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
//intros the game
console.log(
"Let's play a game where I (computer) pick a number between 1 and 100, and you (human) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//the computer picks a random number between 1 and 100
let randomNumber = chooseRandomNumber(1, 100);
//declare the variable to hold the user's guess
let guess = 0;
console.log("\nI have picked a random number between 1 and 100.");
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//allows the user to guess as long as guess is not equal to the chosen randomNumber
while (randomNumber !== +guess) {
//prompts the use for a guess
guess = await ask("\nPlease make a guess: ");
//makes sure guess submitted is a valid number
while (isNaN(guess)) {
guess = await ask(
"Let's try this again. Please enter a number as your guess. "
);
}
numOfGuess += 1;
//if the guess is less than the randomNumber indicates that the guess is too low
if (randomNumber > +guess) {
console.log("\nYou guessed too low.");
}
//if the guess is greater than the random number indicates that the guess is too high
else if (randomNumber < +guess) {
console.log("\nYou guessed too high.");
}
//only triggered if the guess is equal to the randomNumber
else {
console.log(
`\nCongratulations! You correctly guessed that the number is ${randomNumber}.`
);
//reports the number of guesses it took the user to correctly guess
if (numOfGuess >= 7) {
console.log(
`It took you ${numOfGuess} tries to correctly guess my number. You better keep practicing....`
);
} else {
console.log(`It only took you ${numOfGuess} tries. You're AMAZING!`);
}
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
}
}
}
//lets the user choose which version of teh guess the number game they'd like to play
async function playGame() {
console.log("Hello and Welcome to the Guess The Number Game!");
//asks the user which version of the game they'd like to play
let gameChoice = await ask(
"\nWhich game would you like to play?\n\t[1] You (human) pick a number, and I (computer) try to guess it.\n\t[2] I (computer) pick a number, and you (human) try to guess it.\nPlease select which game you'd like to play by entering either 1 or 2: "
);
//declares an array to hold the valid users input choices
let choices = ["1", "2"];
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//checks that the choice is actually a number
while (isNaN(gameChoice) || !choices.includes(gameChoice)) {
gameChoice = await ask(
"Let's try this again. Please enter either 1 or 2 to choose the game you'd like to play. "
);
}
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//triggers correct game to run once valid game choice has been made
if (gameChoice === "1") {
console.log("\nYou picked game 1, have fun!\n");
computerGuesses();
} else {
console.log("\nYou picked game 2, good luck!\n");
userGuesses();
}
}
playGame();
| chooseRandomNumber | identifier_name |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number game - the users picks a number and the computer guesses
async function computerGuesses() {
//returns a guess that is half way between the min and max range
function makeSmartGuess(min, max) {
return min + Math.floor((max - min) / 2);
}
//cheat detector function that will return true if there is an issue with the response based on known range (true ==> lying, false ==> not lying)
function cheatDetector(min, max, guess, secretNumber, modifyRange) {
//if the computer's guess is the secret number but the user has said no, the computer calls them out for cheating
if (guess === secretNumber) {
console.log(
`\nHmmm, is your name Mufasa, 'cause I think you're a-lying...\n`
);
return true;
} else {
if (modifyRange === "h" || modifyRange === "higher") {
//if the user indicates the number is higher but the guess is already the max included value ==> returns true
if (guess + 1 > max) {
console.log(
`\nLiar, liar pants on fire! You said the number was lower than ${
max + 1
}, so it can't also be higher than ${guess}...\n`
);
return true;
}
}
if (modifyRange === "l" || modifyRange === "lower") {
//if the user indicates the number is lower but the guess is already the min included value ==> returns true
if (guess - 1 < min) |
}
return false;
}
}
//intros the game
console.log(
"Let's play a game where you (human) pick a number between 1 and a maximum, and I (computer) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//keep track of min & max for range of guesses. Default values are 1 and 100
let min = 1;
let max = 100;
//allow the user to set the high range
max = await ask("\nWhat would you like the maximum number to be? ");
//makes sure number submitted is a valid number
while (isNaN(max)) {
max = await ask(
"\nLet's try this again. Please enter a number you'd like to use as the maximum. "
);
}
//confirms the user is ready to play
let readyToPlay = await ask(
`\nHave you decided on a random number between 1 and ${max}? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
//waits until the player is ready to play by entering 'y' or 'yes'
while (readyToPlay !== "y" && readyToPlay !== "yes") {
readyToPlay = await ask(
`\nOk, I'll wait, please pick a number between 1 and ${max}. Are you ready now? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
}
//declares a variable to store the user's number to be used to detect cheating
let secretNumber = await ask(
"\nWhat is your secret number? I won't peak, I promise..."
);
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
//guard clause to check that the secret number entered is a number and within the range
while (isNaN(secretNumber) || secretNumber > max || secretNumber < 1) {
//if the input entered is not a number prompts user to re-enter secret number
if (isNaN(secretNumber)) {
secretNumber = await ask(
`\nYou must enter a number. Please enter your secret number - remember it should be between 1 and ${max}. `
);
}
//if the input is outside of the range 1 to max, prompts the user to re-enter the secret number
else {
secretNumber = await ask(
`\nRemember, the number must be between 1 and ${max}. Please choose a different secret number that is within the correct range. `
);
}
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
}
//returns the secret number the user input
console.log(
`\nYou entered ${secretNumber}. \n\nBeep. Boop. Beep. Erasing from my memory.`
);
//starts the game
console.log("\nNow I will try to guess your secret number!");
// declares the variable that will store the users response if the computer's guess is correct or not
let response = "n";
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//while the user has not responded 'y' to indicate that the computer has correctly guessed, the computer will continue making guesses
while ((response === "n") | (response === "no")) {
//sets the computer up to make a smart guess within the current range
let guess = makeSmartGuess(min, max);
//stores the users response if the computer's guess is correct or not
response = await ask(`\nIs the number ${guess}? (y/n): `);
//sanitizes response
response = response.trim().toLowerCase();
//the computer has made another guess - index number of guesses made by 1
numOfGuess += 1;
// if the computer guessed the correct number ==> user responds 'y' and game gives victory message
if (response === "y" || response === "yes") {
console.log(
`\nAha! Your number was ${guess}! I win!\nIt only took me ${numOfGuess} tries to correctly guess your number.`
);
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
//if the computer guessed wrong the user answers 'n' and computer asks if the number is higher or lower
else {
//if the min, max and guess are all equal then the computer has correctly narrowed down the number and the user is cheating
if (cheatDetector(min, max, guess, secretNumber, "")) {
console.log("Please be honest this time....");
//since numOfGuess will iterate once more when the computer prompts the users again we need to walk it down by 1 to correctly indicate the number of guesses taken
numOfGuess -= 1;
} else {
console.log("\nBummer.");
//declare the variable modifyRange that will hold h/l
let modifyRange = "";
while (!modifyRange) {
//stores the h/l response from the user in modifyRange
modifyRange = await ask(
`Is the number higher (h) or lower (l) than ${guess}? `
);
//sanitizes modifyRange
modifyRange = modifyRange.trim().toLowerCase();
// if the number is higher, the guess+1 is the new min of the range
if (modifyRange === "h" || modifyRange === "higher") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
min = guess + 1;
}
}
//if the number is lower, the guess-1 is the new max of the range
else if (modifyRange === "l" || modifyRange === "lower") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
max = guess - 1;
}
}
}
}
}
}
}
}
//Guess The Number game - the computer picks a number and the user guesses
async function userGuesses() {
//returns a random guess between the given min and max range
function chooseRandomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
//intros the game
console.log(
"Let's play a game where I (computer) pick a number between 1 and 100, and you (human) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//the computer picks a random number between 1 and 100
let randomNumber = chooseRandomNumber(1, 100);
//declare the variable to hold the user's guess
let guess = 0;
console.log("\nI have picked a random number between 1 and 100.");
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//allows the user to guess as long as guess is not equal to the chosen randomNumber
while (randomNumber !== +guess) {
//prompts the use for a guess
guess = await ask("\nPlease make a guess: ");
//makes sure guess submitted is a valid number
while (isNaN(guess)) {
guess = await ask(
"Let's try this again. Please enter a number as your guess. "
);
}
numOfGuess += 1;
//if the guess is less than the randomNumber indicates that the guess is too low
if (randomNumber > +guess) {
console.log("\nYou guessed too low.");
}
//if the guess is greater than the random number indicates that the guess is too high
else if (randomNumber < +guess) {
console.log("\nYou guessed too high.");
}
//only triggered if the guess is equal to the randomNumber
else {
console.log(
`\nCongratulations! You correctly guessed that the number is ${randomNumber}.`
);
//reports the number of guesses it took the user to correctly guess
if (numOfGuess >= 7) {
console.log(
`It took you ${numOfGuess} tries to correctly guess my number. You better keep practicing....`
);
} else {
console.log(`It only took you ${numOfGuess} tries. You're AMAZING!`);
}
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
}
}
}
//lets the user choose which version of teh guess the number game they'd like to play
async function playGame() {
console.log("Hello and Welcome to the Guess The Number Game!");
//asks the user which version of the game they'd like to play
let gameChoice = await ask(
"\nWhich game would you like to play?\n\t[1] You (human) pick a number, and I (computer) try to guess it.\n\t[2] I (computer) pick a number, and you (human) try to guess it.\nPlease select which game you'd like to play by entering either 1 or 2: "
);
//declares an array to hold the valid users input choices
let choices = ["1", "2"];
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//checks that the choice is actually a number
while (isNaN(gameChoice) || !choices.includes(gameChoice)) {
gameChoice = await ask(
"Let's try this again. Please enter either 1 or 2 to choose the game you'd like to play. "
);
}
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//triggers correct game to run once valid game choice has been made
if (gameChoice === "1") {
console.log("\nYou picked game 1, have fun!\n");
computerGuesses();
} else {
console.log("\nYou picked game 2, good luck!\n");
userGuesses();
}
}
playGame();
| {
console.log(
`\nCheater, cheater pumpkin eater! You said the number was higher than ${
min - 1
}, so it can't also be lower than ${guess}!\n`
);
return true;
} | conditional_block |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number game - the users picks a number and the computer guesses
async function computerGuesses() {
//returns a guess that is half way between the min and max range
function makeSmartGuess(min, max) {
return min + Math.floor((max - min) / 2);
}
//cheat detector function that will return true if there is an issue with the response based on known range (true ==> lying, false ==> not lying)
function cheatDetector(min, max, guess, secretNumber, modifyRange) {
//if the computer's guess is the secret number but the user has said no, the computer calls them out for cheating
if (guess === secretNumber) {
console.log(
`\nHmmm, is your name Mufasa, 'cause I think you're a-lying...\n`
);
return true;
} else {
if (modifyRange === "h" || modifyRange === "higher") {
//if the user indicates the number is higher but the guess is already the max included value ==> returns true
if (guess + 1 > max) {
console.log(
`\nLiar, liar pants on fire! You said the number was lower than ${
max + 1
}, so it can't also be higher than ${guess}...\n`
);
return true;
}
}
if (modifyRange === "l" || modifyRange === "lower") {
//if the user indicates the number is lower but the guess is already the min included value ==> returns true
if (guess - 1 < min) {
console.log(
`\nCheater, cheater pumpkin eater! You said the number was higher than ${
min - 1
}, so it can't also be lower than ${guess}!\n`
);
return true;
}
}
return false;
}
}
//intros the game
console.log(
"Let's play a game where you (human) pick a number between 1 and a maximum, and I (computer) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//keep track of min & max for range of guesses. Default values are 1 and 100
let min = 1;
let max = 100;
//allow the user to set the high range
max = await ask("\nWhat would you like the maximum number to be? ");
//makes sure number submitted is a valid number
while (isNaN(max)) {
max = await ask(
"\nLet's try this again. Please enter a number you'd like to use as the maximum. "
);
}
//confirms the user is ready to play
let readyToPlay = await ask(
`\nHave you decided on a random number between 1 and ${max}? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
//waits until the player is ready to play by entering 'y' or 'yes'
while (readyToPlay !== "y" && readyToPlay !== "yes") {
readyToPlay = await ask(
`\nOk, I'll wait, please pick a number between 1 and ${max}. Are you ready now? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
}
//declares a variable to store the user's number to be used to detect cheating
let secretNumber = await ask(
"\nWhat is your secret number? I won't peak, I promise..."
);
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
//guard clause to check that the secret number entered is a number and within the range
while (isNaN(secretNumber) || secretNumber > max || secretNumber < 1) {
//if the input entered is not a number prompts user to re-enter secret number
if (isNaN(secretNumber)) {
secretNumber = await ask(
`\nYou must enter a number. Please enter your secret number - remember it should be between 1 and ${max}. `
);
}
//if the input is outside of the range 1 to max, prompts the user to re-enter the secret number
else {
secretNumber = await ask(
`\nRemember, the number must be between 1 and ${max}. Please choose a different secret number that is within the correct range. `
);
}
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
}
//returns the secret number the user input
console.log(
`\nYou entered ${secretNumber}. \n\nBeep. Boop. Beep. Erasing from my memory.`
);
//starts the game
console.log("\nNow I will try to guess your secret number!");
// declares the variable that will store the users response if the computer's guess is correct or not
let response = "n";
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//while the user has not responded 'y' to indicate that the computer has correctly guessed, the computer will continue making guesses
while ((response === "n") | (response === "no")) {
//sets the computer up to make a smart guess within the current range
let guess = makeSmartGuess(min, max);
//stores the users response if the computer's guess is correct or not
response = await ask(`\nIs the number ${guess}? (y/n): `);
//sanitizes response
response = response.trim().toLowerCase();
//the computer has made another guess - index number of guesses made by 1
numOfGuess += 1;
// if the computer guessed the correct number ==> user responds 'y' and game gives victory message
if (response === "y" || response === "yes") {
console.log(
`\nAha! Your number was ${guess}! I win!\nIt only took me ${numOfGuess} tries to correctly guess your number.`
);
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): "); | //sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
//if the computer guessed wrong the user answers 'n' and computer asks if the number is higher or lower
else {
//if the min, max and guess are all equal then the computer has correctly narrowed down the number and the user is cheating
if (cheatDetector(min, max, guess, secretNumber, "")) {
console.log("Please be honest this time....");
//since numOfGuess will iterate once more when the computer prompts the users again we need to walk it down by 1 to correctly indicate the number of guesses taken
numOfGuess -= 1;
} else {
console.log("\nBummer.");
//declare the variable modifyRange that will hold h/l
let modifyRange = "";
while (!modifyRange) {
//stores the h/l response from the user in modifyRange
modifyRange = await ask(
`Is the number higher (h) or lower (l) than ${guess}? `
);
//sanitizes modifyRange
modifyRange = modifyRange.trim().toLowerCase();
// if the number is higher, the guess+1 is the new min of the range
if (modifyRange === "h" || modifyRange === "higher") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
min = guess + 1;
}
}
//if the number is lower, the guess-1 is the new max of the range
else if (modifyRange === "l" || modifyRange === "lower") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
max = guess - 1;
}
}
}
}
}
}
}
}
//Guess The Number game - the computer picks a number and the user guesses
async function userGuesses() {
//returns a random guess between the given min and max range
function chooseRandomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
//intros the game
console.log(
"Let's play a game where I (computer) pick a number between 1 and 100, and you (human) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//the computer picks a random number between 1 and 100
let randomNumber = chooseRandomNumber(1, 100);
//declare the variable to hold the user's guess
let guess = 0;
console.log("\nI have picked a random number between 1 and 100.");
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//allows the user to guess as long as guess is not equal to the chosen randomNumber
while (randomNumber !== +guess) {
//prompts the use for a guess
guess = await ask("\nPlease make a guess: ");
//makes sure guess submitted is a valid number
while (isNaN(guess)) {
guess = await ask(
"Let's try this again. Please enter a number as your guess. "
);
}
numOfGuess += 1;
//if the guess is less than the randomNumber indicates that the guess is too low
if (randomNumber > +guess) {
console.log("\nYou guessed too low.");
}
//if the guess is greater than the random number indicates that the guess is too high
else if (randomNumber < +guess) {
console.log("\nYou guessed too high.");
}
//only triggered if the guess is equal to the randomNumber
else {
console.log(
`\nCongratulations! You correctly guessed that the number is ${randomNumber}.`
);
//reports the number of guesses it took the user to correctly guess
if (numOfGuess >= 7) {
console.log(
`It took you ${numOfGuess} tries to correctly guess my number. You better keep practicing....`
);
} else {
console.log(`It only took you ${numOfGuess} tries. You're AMAZING!`);
}
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
}
}
}
//lets the user choose which version of teh guess the number game they'd like to play
async function playGame() {
console.log("Hello and Welcome to the Guess The Number Game!");
//asks the user which version of the game they'd like to play
let gameChoice = await ask(
"\nWhich game would you like to play?\n\t[1] You (human) pick a number, and I (computer) try to guess it.\n\t[2] I (computer) pick a number, and you (human) try to guess it.\nPlease select which game you'd like to play by entering either 1 or 2: "
);
//declares an array to hold the valid users input choices
let choices = ["1", "2"];
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//checks that the choice is actually a number
while (isNaN(gameChoice) || !choices.includes(gameChoice)) {
gameChoice = await ask(
"Let's try this again. Please enter either 1 or 2 to choose the game you'd like to play. "
);
}
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//triggers correct game to run once valid game choice has been made
if (gameChoice === "1") {
console.log("\nYou picked game 1, have fun!\n");
computerGuesses();
} else {
console.log("\nYou picked game 2, good luck!\n");
userGuesses();
}
}
playGame(); | random_line_split |
|
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number game - the users picks a number and the computer guesses
async function computerGuesses() {
//returns a guess that is half way between the min and max range
function makeSmartGuess(min, max) |
//cheat detector function that will return true if there is an issue with the response based on known range (true ==> lying, false ==> not lying)
function cheatDetector(min, max, guess, secretNumber, modifyRange) {
//if the computer's guess is the secret number but the user has said no, the computer calls them out for cheating
if (guess === secretNumber) {
console.log(
`\nHmmm, is your name Mufasa, 'cause I think you're a-lying...\n`
);
return true;
} else {
if (modifyRange === "h" || modifyRange === "higher") {
//if the user indicates the number is higher but the guess is already the max included value ==> returns true
if (guess + 1 > max) {
console.log(
`\nLiar, liar pants on fire! You said the number was lower than ${
max + 1
}, so it can't also be higher than ${guess}...\n`
);
return true;
}
}
if (modifyRange === "l" || modifyRange === "lower") {
//if the user indicates the number is lower but the guess is already the min included value ==> returns true
if (guess - 1 < min) {
console.log(
`\nCheater, cheater pumpkin eater! You said the number was higher than ${
min - 1
}, so it can't also be lower than ${guess}!\n`
);
return true;
}
}
return false;
}
}
//intros the game
console.log(
"Let's play a game where you (human) pick a number between 1 and a maximum, and I (computer) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//keep track of min & max for range of guesses. Default values are 1 and 100
let min = 1;
let max = 100;
//allow the user to set the high range
max = await ask("\nWhat would you like the maximum number to be? ");
//makes sure number submitted is a valid number
while (isNaN(max)) {
max = await ask(
"\nLet's try this again. Please enter a number you'd like to use as the maximum. "
);
}
//confirms the user is ready to play
let readyToPlay = await ask(
`\nHave you decided on a random number between 1 and ${max}? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
//waits until the player is ready to play by entering 'y' or 'yes'
while (readyToPlay !== "y" && readyToPlay !== "yes") {
readyToPlay = await ask(
`\nOk, I'll wait, please pick a number between 1 and ${max}. Are you ready now? (y/n): `
);
//sanitizes readyToPlay
readyToPlay = readyToPlay.trim().toLowerCase();
}
//declares a variable to store the user's number to be used to detect cheating
let secretNumber = await ask(
"\nWhat is your secret number? I won't peak, I promise..."
);
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
//guard clause to check that the secret number entered is a number and within the range
while (isNaN(secretNumber) || secretNumber > max || secretNumber < 1) {
//if the input entered is not a number prompts user to re-enter secret number
if (isNaN(secretNumber)) {
secretNumber = await ask(
`\nYou must enter a number. Please enter your secret number - remember it should be between 1 and ${max}. `
);
}
//if the input is outside of the range 1 to max, prompts the user to re-enter the secret number
else {
secretNumber = await ask(
`\nRemember, the number must be between 1 and ${max}. Please choose a different secret number that is within the correct range. `
);
}
//sanitizes input into a number if a string has been entered
secretNumber = +secretNumber;
}
//returns the secret number the user input
console.log(
`\nYou entered ${secretNumber}. \n\nBeep. Boop. Beep. Erasing from my memory.`
);
//starts the game
console.log("\nNow I will try to guess your secret number!");
// declares the variable that will store the users response if the computer's guess is correct or not
let response = "n";
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//while the user has not responded 'y' to indicate that the computer has correctly guessed, the computer will continue making guesses
while ((response === "n") | (response === "no")) {
//sets the computer up to make a smart guess within the current range
let guess = makeSmartGuess(min, max);
//stores the users response if the computer's guess is correct or not
response = await ask(`\nIs the number ${guess}? (y/n): `);
//sanitizes response
response = response.trim().toLowerCase();
//the computer has made another guess - index number of guesses made by 1
numOfGuess += 1;
// if the computer guessed the correct number ==> user responds 'y' and game gives victory message
if (response === "y" || response === "yes") {
console.log(
`\nAha! Your number was ${guess}! I win!\nIt only took me ${numOfGuess} tries to correctly guess your number.`
);
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
//if the computer guessed wrong the user answers 'n' and computer asks if the number is higher or lower
else {
//if the min, max and guess are all equal then the computer has correctly narrowed down the number and the user is cheating
if (cheatDetector(min, max, guess, secretNumber, "")) {
console.log("Please be honest this time....");
//since numOfGuess will iterate once more when the computer prompts the users again we need to walk it down by 1 to correctly indicate the number of guesses taken
numOfGuess -= 1;
} else {
console.log("\nBummer.");
//declare the variable modifyRange that will hold h/l
let modifyRange = "";
while (!modifyRange) {
//stores the h/l response from the user in modifyRange
modifyRange = await ask(
`Is the number higher (h) or lower (l) than ${guess}? `
);
//sanitizes modifyRange
modifyRange = modifyRange.trim().toLowerCase();
// if the number is higher, the guess+1 is the new min of the range
if (modifyRange === "h" || modifyRange === "higher") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
min = guess + 1;
}
}
//if the number is lower, the guess-1 is the new max of the range
else if (modifyRange === "l" || modifyRange === "lower") {
if (cheatDetector(min, max, guess, secretNumber, modifyRange)) {
console.log("Please tell me the truth this time...");
modifyRange = "";
} else {
max = guess - 1;
}
}
}
}
}
}
}
}
//Guess The Number game - the computer picks a number and the user guesses
async function userGuesses() {
//returns a random guess between the given min and max range
function chooseRandomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
//intros the game
console.log(
"Let's play a game where I (computer) pick a number between 1 and 100, and you (human) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes the game will continue to run. If the user selects no the game ends
while (wantToPlay === "y" || wantToPlay === "yes") {
//the computer picks a random number between 1 and 100
let randomNumber = chooseRandomNumber(1, 100);
//declare the variable to hold the user's guess
let guess = 0;
console.log("\nI have picked a random number between 1 and 100.");
//declares the numOfGuess variable to keep track of the number of guesses
let numOfGuess = 0;
//allows the user to guess as long as guess is not equal to the chosen randomNumber
while (randomNumber !== +guess) {
//prompts the use for a guess
guess = await ask("\nPlease make a guess: ");
//makes sure guess submitted is a valid number
while (isNaN(guess)) {
guess = await ask(
"Let's try this again. Please enter a number as your guess. "
);
}
numOfGuess += 1;
//if the guess is less than the randomNumber indicates that the guess is too low
if (randomNumber > +guess) {
console.log("\nYou guessed too low.");
}
//if the guess is greater than the random number indicates that the guess is too high
else if (randomNumber < +guess) {
console.log("\nYou guessed too high.");
}
//only triggered if the guess is equal to the randomNumber
else {
console.log(
`\nCongratulations! You correctly guessed that the number is ${randomNumber}.`
);
//reports the number of guesses it took the user to correctly guess
if (numOfGuess >= 7) {
console.log(
`It took you ${numOfGuess} tries to correctly guess my number. You better keep practicing....`
);
} else {
console.log(`It only took you ${numOfGuess} tries. You're AMAZING!`);
}
//prompts the user if they'd like to play again
wantToPlay = await ask("\nWould you like to play again? (y/n): ");
//sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
}
}
}
//lets the user choose which version of teh guess the number game they'd like to play
async function playGame() {
console.log("Hello and Welcome to the Guess The Number Game!");
//asks the user which version of the game they'd like to play
let gameChoice = await ask(
"\nWhich game would you like to play?\n\t[1] You (human) pick a number, and I (computer) try to guess it.\n\t[2] I (computer) pick a number, and you (human) try to guess it.\nPlease select which game you'd like to play by entering either 1 or 2: "
);
//declares an array to hold the valid users input choices
let choices = ["1", "2"];
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//checks that the choice is actually a number
while (isNaN(gameChoice) || !choices.includes(gameChoice)) {
gameChoice = await ask(
"Let's try this again. Please enter either 1 or 2 to choose the game you'd like to play. "
);
}
//sanitizes gameChoice
gameChoice = gameChoice.trim();
//triggers correct game to run once valid game choice has been made
if (gameChoice === "1") {
console.log("\nYou picked game 1, have fun!\n");
computerGuesses();
} else {
console.log("\nYou picked game 2, good luck!\n");
userGuesses();
}
}
playGame();
| {
return min + Math.floor((max - min) / 2);
} | identifier_body |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserialize an
//! enum, we encode the enum variants with a version/identifier tag
//! for ourselves. This will make it a little easier to manage
//! client and server instances that are built from different versions
//! of this code; in this way the client and server can more gracefully
//! manage unknown enum variants.
#![allow(dead_code)]
use crate::mux::domain::DomainId;
use crate::mux::tab::TabId;
use crate::mux::window::WindowId;
use anyhow::{bail, Error};
use leb128;
use log::debug;
use portable_pty::{CommandBuilder, PtySize};
use serde_derive::*;
use std::io::Cursor;
use std::sync::Arc;
use term::selection::SelectionRange;
use termwiz::hyperlink::Hyperlink;
use termwiz::surface::{Change, SequenceNo};
use varbincode;
/// Returns the encoded length of the leb128 representation of value
fn | (value: u64) -> usize {
struct NullWrite {};
impl std::io::Write for NullWrite {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
Ok(buf.len())
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
Ok(())
}
};
leb128::write::unsigned(&mut NullWrite {}, value).unwrap()
}
const COMPRESSED_MASK: u64 = 1 << 63;
/// Encode a frame. If the data is compressed, the high bit of the length
/// is set to indicate that. The data written out has the format:
/// tagged_len: leb128 (u64 msb is set if data is compressed)
/// serial: leb128
/// ident: leb128
/// data bytes
fn encode_raw<W: std::io::Write>(
ident: u64,
serial: u64,
data: &[u8],
is_compressed: bool,
mut w: W,
) -> Result<(), std::io::Error> {
let len = data.len() + encoded_length(ident) + encoded_length(serial);
let masked_len = if is_compressed {
(len as u64) | COMPRESSED_MASK
} else {
len as u64
};
// Double-buffer the data; since we run with nodelay enabled, it is
// desirable for the write to be a single packet (or at least, for
// the header portion to go out in a single packet)
let mut buffer = Vec::with_capacity(len + encoded_length(masked_len));
leb128::write::unsigned(&mut buffer, masked_len)?;
leb128::write::unsigned(&mut buffer, serial)?;
leb128::write::unsigned(&mut buffer, ident)?;
buffer.extend_from_slice(data);
w.write_all(&buffer)
}
/// Read a single leb128 encoded value from the stream
fn read_u64<R: std::io::Read>(mut r: R) -> Result<u64, std::io::Error> {
leb128::read::unsigned(&mut r).map_err(|err| match err {
leb128::read::Error::IoError(ioerr) => ioerr,
err => std::io::Error::new(std::io::ErrorKind::Other, format!("{}", err)),
})
}
#[derive(Debug)]
struct Decoded {
ident: u64,
serial: u64,
data: Vec<u8>,
is_compressed: bool,
}
/// Decode a frame.
/// See encode_raw() for the frame format.
fn decode_raw<R: std::io::Read>(mut r: R) -> Result<Decoded, std::io::Error> {
let len = read_u64(r.by_ref())?;
let (len, is_compressed) = if (len & COMPRESSED_MASK) != 0 {
(len & !COMPRESSED_MASK, true)
} else {
(len, false)
};
let serial = read_u64(r.by_ref())?;
let ident = read_u64(r.by_ref())?;
let data_len = len as usize - (encoded_length(ident) + encoded_length(serial));
let mut data = vec![0u8; data_len];
r.read_exact(&mut data)?;
Ok(Decoded {
ident,
serial,
data,
is_compressed,
})
}
#[derive(Debug, PartialEq)]
pub struct DecodedPdu {
pub serial: u64,
pub pdu: Pdu,
}
/// If the serialized size is larger than this, then we'll consider compressing it
const COMPRESS_THRESH: usize = 32;
fn serialize<T: serde::Serialize>(t: &T) -> Result<(Vec<u8>, bool), Error> {
let mut uncompressed = Vec::new();
let mut encode = varbincode::Serializer::new(&mut uncompressed);
t.serialize(&mut encode)?;
if uncompressed.len() <= COMPRESS_THRESH {
return Ok((uncompressed, false));
}
// It's a little heavy; let's try compressing it
let mut compressed = Vec::new();
let mut compress = zstd::Encoder::new(&mut compressed, zstd::DEFAULT_COMPRESSION_LEVEL)?;
let mut encode = varbincode::Serializer::new(&mut compress);
t.serialize(&mut encode)?;
drop(encode);
compress.finish()?;
debug!(
"serialized+compress len {} vs {}",
compressed.len(),
uncompressed.len()
);
if compressed.len() < uncompressed.len() {
Ok((compressed, true))
} else {
Ok((uncompressed, false))
}
}
fn deserialize<T: serde::de::DeserializeOwned, R: std::io::Read>(
mut r: R,
is_compressed: bool,
) -> Result<T, Error> {
if is_compressed {
let mut decompress = zstd::Decoder::new(r)?;
let mut decode = varbincode::Deserializer::new(&mut decompress);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
} else {
let mut decode = varbincode::Deserializer::new(&mut r);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
}
}
macro_rules! pdu {
($( $name:ident:$vers:expr),* $(,)?) => {
#[derive(PartialEq, Debug)]
pub enum Pdu {
Invalid{ident: u64},
$(
$name($name)
,)*
}
impl Pdu {
pub fn encode<W: std::io::Write>(&self, w: W, serial: u64) -> Result<(), Error> {
match self {
Pdu::Invalid{..} => bail!("attempted to serialize Pdu::Invalid"),
$(
Pdu::$name(s) => {
let (data, is_compressed) = serialize(s)?;
encode_raw($vers, serial, &data, is_compressed, w)?;
Ok(())
}
,)*
}
}
pub fn decode<R: std::io::Read>(r:R) -> Result<DecodedPdu, Error> {
let decoded = decode_raw(r)?;
match decoded.ident {
$(
$vers => {
Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::$name(deserialize(decoded.data.as_slice(), decoded.is_compressed)?)
})
}
,)*
_ => Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::Invalid{ident:decoded.ident}
}),
}
}
}
}
}
// Defines the Pdu enum.
// Each struct has an explicit identifying number.
// This allows removal of obsolete structs,
// and defining newer structs as the protocol evolves.
pdu! {
ErrorResponse: 0,
Ping: 1,
Pong: 2,
ListTabs: 3,
ListTabsResponse: 4,
Spawn: 7,
SpawnResponse: 8,
WriteToTab: 9,
UnitResponse: 10,
SendKeyDown: 11,
SendMouseEvent: 12,
SendPaste: 13,
Resize: 14,
SendMouseEventResponse: 17,
GetTabRenderChanges: 18,
GetTabRenderChangesResponse: 19,
SetClipboard: 20,
OpenURL: 21,
}
impl Pdu {
pub fn stream_decode(buffer: &mut Vec<u8>) -> anyhow::Result<Option<DecodedPdu>> {
let mut cursor = Cursor::new(buffer.as_slice());
match Self::decode(&mut cursor) {
Ok(decoded) => {
let consumed = cursor.position() as usize;
let remain = buffer.len() - consumed;
// Remove `consumed` bytes from the start of the vec.
// This is safe because the vec is just bytes and we are
// constrained the offsets accordingly.
unsafe {
std::ptr::copy_nonoverlapping(
buffer.as_ptr().add(consumed),
buffer.as_mut_ptr(),
remain,
);
}
buffer.truncate(remain);
Ok(Some(decoded))
}
Err(err) => {
if let Some(ioerr) = err.downcast_ref::<std::io::Error>() {
match ioerr.kind() {
std::io::ErrorKind::UnexpectedEof | std::io::ErrorKind::WouldBlock => {
return Ok(None);
}
_ => {}
}
}
Err(err)
}
}
}
pub fn try_read_and_decode<R: std::io::Read>(
r: &mut R,
buffer: &mut Vec<u8>,
) -> anyhow::Result<Option<DecodedPdu>> {
loop {
if let Some(decoded) = Self::stream_decode(buffer)? {
return Ok(Some(decoded));
}
let mut buf = [0u8; 4096];
let size = match r.read(&mut buf) {
Ok(size) => size,
Err(err) => {
if err.kind() == std::io::ErrorKind::WouldBlock {
return Ok(None);
}
return Err(err.into());
}
};
if size == 0 {
return Err(
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "End Of File").into(),
);
}
buffer.extend_from_slice(&buf[0..size]);
}
}
pub fn tab_id(&self) -> Option<TabId> {
match self {
Pdu::GetTabRenderChangesResponse(GetTabRenderChangesResponse { tab_id, .. }) => {
Some(*tab_id)
}
Pdu::SetClipboard(SetClipboard { tab_id, .. }) => Some(*tab_id),
Pdu::OpenURL(OpenURL { tab_id, .. }) => Some(*tab_id),
_ => None,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct UnitResponse {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ErrorResponse {
pub reason: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Ping {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Pong {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabs {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WindowAndTabEntry {
pub window_id: WindowId,
pub tab_id: TabId,
pub title: String,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabsResponse {
pub tabs: Vec<WindowAndTabEntry>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Spawn {
pub domain_id: DomainId,
/// If None, create a new window for this new tab
pub window_id: Option<WindowId>,
pub command: Option<CommandBuilder>,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SpawnResponse {
pub tab_id: TabId,
pub window_id: WindowId,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WriteToTab {
pub tab_id: TabId,
pub data: Vec<u8>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendPaste {
pub tab_id: TabId,
pub data: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendKeyDown {
pub tab_id: TabId,
pub event: termwiz::input::KeyEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEvent {
pub tab_id: TabId,
pub event: term::input::MouseEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEventResponse {
pub selection_range: Option<SelectionRange>,
pub highlight: Option<Arc<Hyperlink>>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SetClipboard {
pub tab_id: TabId,
pub clipboard: Option<String>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct OpenURL {
pub tab_id: TabId,
pub url: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Resize {
pub tab_id: TabId,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChanges {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChangesResponse {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
pub changes: Vec<Change>,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_frame() {
let mut encoded = Vec::new();
encode_raw(0x81, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(&encoded, b"\x08\x42\x81\x01hello");
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x81);
assert_eq!(decoded.serial, 0x42);
assert_eq!(decoded.data, b"hello");
}
#[test]
fn test_frame_lengths() {
let mut serial = 1;
for target_len in &[128, 247, 256, 65536, 16777216] {
let mut payload = Vec::with_capacity(*target_len);
payload.resize(*target_len, b'a');
let mut encoded = Vec::new();
encode_raw(0x42, serial, payload.as_slice(), false, &mut encoded).unwrap();
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x42);
assert_eq!(decoded.serial, serial);
assert_eq!(decoded.data, payload);
serial += 1;
}
}
#[test]
fn test_pdu_ping() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x40).unwrap();
assert_eq!(&encoded, &[2, 0x40, 1]);
assert_eq!(
DecodedPdu {
serial: 0x40,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn stream_decode() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x1).unwrap();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x2).unwrap();
assert_eq!(encoded.len(), 6);
let mut cursor = Cursor::new(encoded.as_slice());
let mut read_buffer = Vec::new();
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 1,
pdu: Pdu::Ping(Ping {})
})
);
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 2,
pdu: Pdu::Pong(Pong {})
})
);
let err = Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap_err();
assert_eq!(
err.downcast_ref::<std::io::Error>().unwrap().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[test]
fn test_pdu_ping_base91() {
let mut encoded = Vec::new();
{
let mut encoder = base91::Base91Encoder::new(&mut encoded);
Pdu::Ping(Ping {}).encode(&mut encoder, 0x41).unwrap();
}
assert_eq!(&encoded, &[60, 67, 75, 65]);
let decoded = base91::decode(&encoded);
assert_eq!(
DecodedPdu {
serial: 0x41,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(decoded.as_slice()).unwrap()
);
}
#[test]
fn test_pdu_pong() {
let mut encoded = Vec::new();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x42).unwrap();
assert_eq!(&encoded, &[2, 0x42, 2]);
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Pong(Pong {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn test_bogus_pdu() {
let mut encoded = Vec::new();
encode_raw(0xdeadbeef, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Invalid { ident: 0xdeadbeef }
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
}
| encoded_length | identifier_name |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserialize an
//! enum, we encode the enum variants with a version/identifier tag
//! for ourselves. This will make it a little easier to manage
//! client and server instances that are built from different versions
//! of this code; in this way the client and server can more gracefully
//! manage unknown enum variants.
#![allow(dead_code)]
use crate::mux::domain::DomainId;
use crate::mux::tab::TabId;
use crate::mux::window::WindowId;
use anyhow::{bail, Error};
use leb128;
use log::debug;
use portable_pty::{CommandBuilder, PtySize};
use serde_derive::*;
use std::io::Cursor;
use std::sync::Arc;
use term::selection::SelectionRange;
use termwiz::hyperlink::Hyperlink;
use termwiz::surface::{Change, SequenceNo};
use varbincode;
/// Returns the encoded length of the leb128 representation of value
fn encoded_length(value: u64) -> usize {
struct NullWrite {};
impl std::io::Write for NullWrite {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
Ok(buf.len())
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
Ok(())
}
};
leb128::write::unsigned(&mut NullWrite {}, value).unwrap()
}
const COMPRESSED_MASK: u64 = 1 << 63;
/// Encode a frame. If the data is compressed, the high bit of the length
/// is set to indicate that. The data written out has the format:
/// tagged_len: leb128 (u64 msb is set if data is compressed)
/// serial: leb128
/// ident: leb128
/// data bytes
fn encode_raw<W: std::io::Write>(
ident: u64,
serial: u64,
data: &[u8],
is_compressed: bool,
mut w: W,
) -> Result<(), std::io::Error> {
let len = data.len() + encoded_length(ident) + encoded_length(serial);
let masked_len = if is_compressed {
(len as u64) | COMPRESSED_MASK
} else {
len as u64
};
// Double-buffer the data; since we run with nodelay enabled, it is
// desirable for the write to be a single packet (or at least, for
// the header portion to go out in a single packet)
let mut buffer = Vec::with_capacity(len + encoded_length(masked_len));
leb128::write::unsigned(&mut buffer, masked_len)?;
leb128::write::unsigned(&mut buffer, serial)?;
leb128::write::unsigned(&mut buffer, ident)?;
buffer.extend_from_slice(data);
w.write_all(&buffer)
}
/// Read a single leb128 encoded value from the stream
fn read_u64<R: std::io::Read>(mut r: R) -> Result<u64, std::io::Error> {
leb128::read::unsigned(&mut r).map_err(|err| match err {
leb128::read::Error::IoError(ioerr) => ioerr,
err => std::io::Error::new(std::io::ErrorKind::Other, format!("{}", err)),
})
}
#[derive(Debug)]
struct Decoded {
ident: u64,
serial: u64,
data: Vec<u8>,
is_compressed: bool,
}
/// Decode a frame.
/// See encode_raw() for the frame format.
fn decode_raw<R: std::io::Read>(mut r: R) -> Result<Decoded, std::io::Error> {
let len = read_u64(r.by_ref())?;
let (len, is_compressed) = if (len & COMPRESSED_MASK) != 0 {
(len & !COMPRESSED_MASK, true)
} else {
(len, false)
};
let serial = read_u64(r.by_ref())?;
let ident = read_u64(r.by_ref())?;
let data_len = len as usize - (encoded_length(ident) + encoded_length(serial));
let mut data = vec![0u8; data_len];
r.read_exact(&mut data)?;
Ok(Decoded {
ident,
serial,
data,
is_compressed,
})
}
| pub serial: u64,
pub pdu: Pdu,
}
/// If the serialized size is larger than this, then we'll consider compressing it
const COMPRESS_THRESH: usize = 32;
fn serialize<T: serde::Serialize>(t: &T) -> Result<(Vec<u8>, bool), Error> {
let mut uncompressed = Vec::new();
let mut encode = varbincode::Serializer::new(&mut uncompressed);
t.serialize(&mut encode)?;
if uncompressed.len() <= COMPRESS_THRESH {
return Ok((uncompressed, false));
}
// It's a little heavy; let's try compressing it
let mut compressed = Vec::new();
let mut compress = zstd::Encoder::new(&mut compressed, zstd::DEFAULT_COMPRESSION_LEVEL)?;
let mut encode = varbincode::Serializer::new(&mut compress);
t.serialize(&mut encode)?;
drop(encode);
compress.finish()?;
debug!(
"serialized+compress len {} vs {}",
compressed.len(),
uncompressed.len()
);
if compressed.len() < uncompressed.len() {
Ok((compressed, true))
} else {
Ok((uncompressed, false))
}
}
fn deserialize<T: serde::de::DeserializeOwned, R: std::io::Read>(
mut r: R,
is_compressed: bool,
) -> Result<T, Error> {
if is_compressed {
let mut decompress = zstd::Decoder::new(r)?;
let mut decode = varbincode::Deserializer::new(&mut decompress);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
} else {
let mut decode = varbincode::Deserializer::new(&mut r);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
}
}
macro_rules! pdu {
($( $name:ident:$vers:expr),* $(,)?) => {
#[derive(PartialEq, Debug)]
pub enum Pdu {
Invalid{ident: u64},
$(
$name($name)
,)*
}
impl Pdu {
pub fn encode<W: std::io::Write>(&self, w: W, serial: u64) -> Result<(), Error> {
match self {
Pdu::Invalid{..} => bail!("attempted to serialize Pdu::Invalid"),
$(
Pdu::$name(s) => {
let (data, is_compressed) = serialize(s)?;
encode_raw($vers, serial, &data, is_compressed, w)?;
Ok(())
}
,)*
}
}
pub fn decode<R: std::io::Read>(r:R) -> Result<DecodedPdu, Error> {
let decoded = decode_raw(r)?;
match decoded.ident {
$(
$vers => {
Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::$name(deserialize(decoded.data.as_slice(), decoded.is_compressed)?)
})
}
,)*
_ => Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::Invalid{ident:decoded.ident}
}),
}
}
}
}
}
// Defines the Pdu enum.
// Each struct has an explicit identifying number.
// This allows removal of obsolete structs,
// and defining newer structs as the protocol evolves.
pdu! {
ErrorResponse: 0,
Ping: 1,
Pong: 2,
ListTabs: 3,
ListTabsResponse: 4,
Spawn: 7,
SpawnResponse: 8,
WriteToTab: 9,
UnitResponse: 10,
SendKeyDown: 11,
SendMouseEvent: 12,
SendPaste: 13,
Resize: 14,
SendMouseEventResponse: 17,
GetTabRenderChanges: 18,
GetTabRenderChangesResponse: 19,
SetClipboard: 20,
OpenURL: 21,
}
impl Pdu {
pub fn stream_decode(buffer: &mut Vec<u8>) -> anyhow::Result<Option<DecodedPdu>> {
let mut cursor = Cursor::new(buffer.as_slice());
match Self::decode(&mut cursor) {
Ok(decoded) => {
let consumed = cursor.position() as usize;
let remain = buffer.len() - consumed;
// Remove `consumed` bytes from the start of the vec.
// This is safe because the vec is just bytes and we are
// constrained the offsets accordingly.
unsafe {
std::ptr::copy_nonoverlapping(
buffer.as_ptr().add(consumed),
buffer.as_mut_ptr(),
remain,
);
}
buffer.truncate(remain);
Ok(Some(decoded))
}
Err(err) => {
if let Some(ioerr) = err.downcast_ref::<std::io::Error>() {
match ioerr.kind() {
std::io::ErrorKind::UnexpectedEof | std::io::ErrorKind::WouldBlock => {
return Ok(None);
}
_ => {}
}
}
Err(err)
}
}
}
pub fn try_read_and_decode<R: std::io::Read>(
r: &mut R,
buffer: &mut Vec<u8>,
) -> anyhow::Result<Option<DecodedPdu>> {
loop {
if let Some(decoded) = Self::stream_decode(buffer)? {
return Ok(Some(decoded));
}
let mut buf = [0u8; 4096];
let size = match r.read(&mut buf) {
Ok(size) => size,
Err(err) => {
if err.kind() == std::io::ErrorKind::WouldBlock {
return Ok(None);
}
return Err(err.into());
}
};
if size == 0 {
return Err(
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "End Of File").into(),
);
}
buffer.extend_from_slice(&buf[0..size]);
}
}
pub fn tab_id(&self) -> Option<TabId> {
match self {
Pdu::GetTabRenderChangesResponse(GetTabRenderChangesResponse { tab_id, .. }) => {
Some(*tab_id)
}
Pdu::SetClipboard(SetClipboard { tab_id, .. }) => Some(*tab_id),
Pdu::OpenURL(OpenURL { tab_id, .. }) => Some(*tab_id),
_ => None,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct UnitResponse {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ErrorResponse {
pub reason: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Ping {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Pong {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabs {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WindowAndTabEntry {
pub window_id: WindowId,
pub tab_id: TabId,
pub title: String,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabsResponse {
pub tabs: Vec<WindowAndTabEntry>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Spawn {
pub domain_id: DomainId,
/// If None, create a new window for this new tab
pub window_id: Option<WindowId>,
pub command: Option<CommandBuilder>,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SpawnResponse {
pub tab_id: TabId,
pub window_id: WindowId,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WriteToTab {
pub tab_id: TabId,
pub data: Vec<u8>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendPaste {
pub tab_id: TabId,
pub data: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendKeyDown {
pub tab_id: TabId,
pub event: termwiz::input::KeyEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEvent {
pub tab_id: TabId,
pub event: term::input::MouseEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEventResponse {
pub selection_range: Option<SelectionRange>,
pub highlight: Option<Arc<Hyperlink>>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SetClipboard {
pub tab_id: TabId,
pub clipboard: Option<String>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct OpenURL {
pub tab_id: TabId,
pub url: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Resize {
pub tab_id: TabId,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChanges {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChangesResponse {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
pub changes: Vec<Change>,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_frame() {
let mut encoded = Vec::new();
encode_raw(0x81, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(&encoded, b"\x08\x42\x81\x01hello");
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x81);
assert_eq!(decoded.serial, 0x42);
assert_eq!(decoded.data, b"hello");
}
#[test]
fn test_frame_lengths() {
let mut serial = 1;
for target_len in &[128, 247, 256, 65536, 16777216] {
let mut payload = Vec::with_capacity(*target_len);
payload.resize(*target_len, b'a');
let mut encoded = Vec::new();
encode_raw(0x42, serial, payload.as_slice(), false, &mut encoded).unwrap();
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x42);
assert_eq!(decoded.serial, serial);
assert_eq!(decoded.data, payload);
serial += 1;
}
}
#[test]
fn test_pdu_ping() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x40).unwrap();
assert_eq!(&encoded, &[2, 0x40, 1]);
assert_eq!(
DecodedPdu {
serial: 0x40,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn stream_decode() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x1).unwrap();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x2).unwrap();
assert_eq!(encoded.len(), 6);
let mut cursor = Cursor::new(encoded.as_slice());
let mut read_buffer = Vec::new();
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 1,
pdu: Pdu::Ping(Ping {})
})
);
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 2,
pdu: Pdu::Pong(Pong {})
})
);
let err = Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap_err();
assert_eq!(
err.downcast_ref::<std::io::Error>().unwrap().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[test]
fn test_pdu_ping_base91() {
let mut encoded = Vec::new();
{
let mut encoder = base91::Base91Encoder::new(&mut encoded);
Pdu::Ping(Ping {}).encode(&mut encoder, 0x41).unwrap();
}
assert_eq!(&encoded, &[60, 67, 75, 65]);
let decoded = base91::decode(&encoded);
assert_eq!(
DecodedPdu {
serial: 0x41,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(decoded.as_slice()).unwrap()
);
}
#[test]
fn test_pdu_pong() {
let mut encoded = Vec::new();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x42).unwrap();
assert_eq!(&encoded, &[2, 0x42, 2]);
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Pong(Pong {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn test_bogus_pdu() {
let mut encoded = Vec::new();
encode_raw(0xdeadbeef, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Invalid { ident: 0xdeadbeef }
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
} | #[derive(Debug, PartialEq)]
pub struct DecodedPdu { | random_line_split |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn struct_fields(&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if !self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> { | let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str() != *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while !self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
}
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
} | random_line_split |
|
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn struct_fields(&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if !self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> {
let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str() != *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while !self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> |
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
}
| {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
} | identifier_body |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn | (&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if !self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> {
let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str() != *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while !self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
}
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
}
| struct_fields | identifier_name |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it feels good to be a gangster", "Message only"]
print("Hello!")
print ("Let\'s get started!")
Choice = raw_input("Do you want to continue as the default user-" + spy.salutation + " " + spy.name + " or create a new user? (Y/N):")
#Function to add a status
def add_status():
updated_status_message = None
if spy.current_status_message is not None:
print ('Your current status message is %s \n' % spy.current_status_message)
else:
print ('You don\'t have any status message currently \n')
default = raw_input("Do you want to select from the older status (y/n)? ")
if default.upper() == "N":
new_status_message = raw_input("What status message do you want to see?: ")
if len(new_status_message) > 0:
STATUS_MESSAGES.append(new_status_message)
updated_status_message = new_status_message
elif default.upper() == 'Y':
item_position = 1
for message in STATUS_MESSAGES:
print ('%d. %s' % (item_position, message))
item_position = item_position + 1
message_selection = int(raw_input("\nChoose the index of the status: "))
if len(STATUS_MESSAGES) >= message_selection:
updated_status_message = STATUS_MESSAGES[message_selection - 1]
else:
print ('The option you chose is not valid! Press either Y or N.')
if updated_status_message:
print ('Your updated status message is:'),
print(updated_status_message)
else:
print('You did not update your status message')
return updated_status_message
#Function to add a friend
def add_friend():
new_friend = Spy(" ", " ", 0, 0.0)
new_friend.name = raw_input("Please add your friend's name: ")
new_friend.salutation = raw_input("Are they Mr or Ms?: ")
new_friend.age = input("Age?: ")
new_friend.age = int(new_friend.age)
new_friend.rating = input("Spy rating?: ")
new_friend.rating = float(new_friend.rating)
if len(new_friend.name) > 0 and new_friend.name.isdigit() == False and 12 < new_friend.age < 50 and new_friend.salutation.isalpha() == True and new_friend.rating >= spy.rating :
friends.append(new_friend)
print('Friend Added!')
else:
|
return len(friends)
#Function to select a friend
def select_a_friend():
item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the friend: ")
friend_choice_position = int(friend_choice) - 1
if friend_choice_position + 1 > len(friends):
print("Sorry,This friend is not present.")
exit()
else:
return friend_choice_position
#Function to send a message
def send_a_message():
friend_choice = select_a_friend()
original_image = raw_input("What is the name of the image?: ")
output_path = "output.jpg"
text = raw_input("What do you want to say? ")
Steganography.encode(original_image, output_path, text)
new_chat = ChatMessage(text)
friends[friend_choice].chats.append(new_chat)
print("Your secret message image is ready!")
#Function to read a message
def read_a_message():
sender = select_a_friend()
output_path = raw_input("What is the name of the image file?: ")
try:
secret_text = Steganography.decode(output_path)
print ("The secret message you read is"),
print (secret_text)
words = secret_text.split()
new = (secret_text.upper()).split()
new_chat = ChatMessage(secret_text)
# Adds the mesaage to the list of chats
friends[sender].chats.append(new_chat)
print("Your secret message has been saved!")
# If any error occurs during decoding
except TypeError:
print("Nothing to decode from the image as it contains no secret message.")
#Function to read chat history
def read_chat_history():
read_for = select_a_friend()
print ('\n')
for chat in friends[read_for].chats:
if chat.sent_by_me:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print("You said:"),
print(str(chat.message))
else:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print(str(friends[read_for].name) + " said:"),
print(str(chat.message))
#Function to select the default user
def start_chat(spy):
spy.name = spy.salutation + " " + spy.name
if 12 < spy.age < 50:
print("Authentication complete.")
print("Welcome " + str(spy.name))
print("Your age:" + str(spy.age))
print("Your rating:"+str(spy.rating))
print("Bravo!Proud to have you on board.")
show_menu = True
while show_menu:
menu_choices = "What do you want to do? \n 1. Add a status update \n" \
" 2. Add a friend \n 3. Send a secret message \n " \
"4. Read a secret message \n 5. Read Chats from a user \n" \
" 6. Close Application \n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0:
menu_choice = int(menu_choice)
if menu_choice == 1:
spy.current_status_message = add_status()
elif menu_choice == 2:
number_of_friends = add_friend()
print ('You have %d friends' % number_of_friends)
elif menu_choice == 3:
send_a_message()
elif menu_choice == 4:
read_a_message()
elif menu_choice == 5:
read_chat_history()
elif menu_choice == 6:
print("Successfully closed")
show_menu = False
else:
print("That was a wrong choice.")
exit()
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
#Block of code from which execution starts
if Choice.upper() == "Y":
#Function call for default user
start_chat(spy)
elif Choice.upper() == "N":
spy = Spy(" ", " ", 0, 0.0)
#
spy.name = raw_input("Welcome to spy chat, you must tell me your spy name first: ")
if len(spy.name) > 0 and spy.name.isdigit() == False:
spy.salutation = raw_input("What should we call you? Mr. or Ms.?")
if len(spy.salutation) > 0:
spy.age = raw_input("Please enter your age: ")
if len(spy.age) > 0:
spy.age = int(spy.age)
if 12 <= spy.age < 50:
print("Welcome to Spy community")
spy.rating = raw_input("Please enter your spy rating: ")
if len(spy.rating) > 0:
spy.rating = float(spy.rating)
if spy.rating > 4.5:
print("Great Ace!")
elif 3.5 <= spy.rating <= 4.5:
print("You are one of the good ones!")
elif 2.5 <= spy.rating <= 3.5:
print("You can always do better.")
else:
print("We will get someone to help you.")
spy.is_online = True
start_chat(spy)
else:
print ("Enter a valid spy rating")
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
else:
print("Please enter a valid age")
else:
print("Please enter your age")
else:
print("Please enter a valid salutation")
else:
print("Please enter a valid name")
else:
print(colored("You did not reply with a yes(Y) or no(N)!", 'green'))
print(colored("Need to run the program again.", 'green'))
exit() | print("Sorry, the friend cannot be a spy!") | conditional_block |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it feels good to be a gangster", "Message only"]
print("Hello!")
print ("Let\'s get started!")
Choice = raw_input("Do you want to continue as the default user-" + spy.salutation + " " + spy.name + " or create a new user? (Y/N):")
#Function to add a status
def add_status():
updated_status_message = None
if spy.current_status_message is not None:
print ('Your current status message is %s \n' % spy.current_status_message)
else:
print ('You don\'t have any status message currently \n')
default = raw_input("Do you want to select from the older status (y/n)? ")
if default.upper() == "N":
new_status_message = raw_input("What status message do you want to see?: ")
if len(new_status_message) > 0:
STATUS_MESSAGES.append(new_status_message)
updated_status_message = new_status_message
elif default.upper() == 'Y':
item_position = 1
for message in STATUS_MESSAGES:
print ('%d. %s' % (item_position, message))
item_position = item_position + 1
message_selection = int(raw_input("\nChoose the index of the status: "))
if len(STATUS_MESSAGES) >= message_selection:
updated_status_message = STATUS_MESSAGES[message_selection - 1]
else:
print ('The option you chose is not valid! Press either Y or N.')
if updated_status_message:
print ('Your updated status message is:'),
print(updated_status_message)
else:
print('You did not update your status message')
return updated_status_message
#Function to add a friend
def add_friend():
new_friend = Spy(" ", " ", 0, 0.0)
new_friend.name = raw_input("Please add your friend's name: ")
new_friend.salutation = raw_input("Are they Mr or Ms?: ")
new_friend.age = input("Age?: ")
new_friend.age = int(new_friend.age)
new_friend.rating = input("Spy rating?: ")
new_friend.rating = float(new_friend.rating)
if len(new_friend.name) > 0 and new_friend.name.isdigit() == False and 12 < new_friend.age < 50 and new_friend.salutation.isalpha() == True and new_friend.rating >= spy.rating :
friends.append(new_friend)
print('Friend Added!')
else:
print("Sorry, the friend cannot be a spy!")
return len(friends)
#Function to select a friend
def | ():
item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the friend: ")
friend_choice_position = int(friend_choice) - 1
if friend_choice_position + 1 > len(friends):
print("Sorry,This friend is not present.")
exit()
else:
return friend_choice_position
#Function to send a message
def send_a_message():
friend_choice = select_a_friend()
original_image = raw_input("What is the name of the image?: ")
output_path = "output.jpg"
text = raw_input("What do you want to say? ")
Steganography.encode(original_image, output_path, text)
new_chat = ChatMessage(text)
friends[friend_choice].chats.append(new_chat)
print("Your secret message image is ready!")
#Function to read a message
def read_a_message():
sender = select_a_friend()
output_path = raw_input("What is the name of the image file?: ")
try:
secret_text = Steganography.decode(output_path)
print ("The secret message you read is"),
print (secret_text)
words = secret_text.split()
new = (secret_text.upper()).split()
new_chat = ChatMessage(secret_text)
# Adds the mesaage to the list of chats
friends[sender].chats.append(new_chat)
print("Your secret message has been saved!")
# If any error occurs during decoding
except TypeError:
print("Nothing to decode from the image as it contains no secret message.")
#Function to read chat history
def read_chat_history():
read_for = select_a_friend()
print ('\n')
for chat in friends[read_for].chats:
if chat.sent_by_me:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print("You said:"),
print(str(chat.message))
else:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print(str(friends[read_for].name) + " said:"),
print(str(chat.message))
#Function to select the default user
def start_chat(spy):
spy.name = spy.salutation + " " + spy.name
if 12 < spy.age < 50:
print("Authentication complete.")
print("Welcome " + str(spy.name))
print("Your age:" + str(spy.age))
print("Your rating:"+str(spy.rating))
print("Bravo!Proud to have you on board.")
show_menu = True
while show_menu:
menu_choices = "What do you want to do? \n 1. Add a status update \n" \
" 2. Add a friend \n 3. Send a secret message \n " \
"4. Read a secret message \n 5. Read Chats from a user \n" \
" 6. Close Application \n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0:
menu_choice = int(menu_choice)
if menu_choice == 1:
spy.current_status_message = add_status()
elif menu_choice == 2:
number_of_friends = add_friend()
print ('You have %d friends' % number_of_friends)
elif menu_choice == 3:
send_a_message()
elif menu_choice == 4:
read_a_message()
elif menu_choice == 5:
read_chat_history()
elif menu_choice == 6:
print("Successfully closed")
show_menu = False
else:
print("That was a wrong choice.")
exit()
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
#Block of code from which execution starts
if Choice.upper() == "Y":
#Function call for default user
start_chat(spy)
elif Choice.upper() == "N":
spy = Spy(" ", " ", 0, 0.0)
#
spy.name = raw_input("Welcome to spy chat, you must tell me your spy name first: ")
if len(spy.name) > 0 and spy.name.isdigit() == False:
spy.salutation = raw_input("What should we call you? Mr. or Ms.?")
if len(spy.salutation) > 0:
spy.age = raw_input("Please enter your age: ")
if len(spy.age) > 0:
spy.age = int(spy.age)
if 12 <= spy.age < 50:
print("Welcome to Spy community")
spy.rating = raw_input("Please enter your spy rating: ")
if len(spy.rating) > 0:
spy.rating = float(spy.rating)
if spy.rating > 4.5:
print("Great Ace!")
elif 3.5 <= spy.rating <= 4.5:
print("You are one of the good ones!")
elif 2.5 <= spy.rating <= 3.5:
print("You can always do better.")
else:
print("We will get someone to help you.")
spy.is_online = True
start_chat(spy)
else:
print ("Enter a valid spy rating")
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
else:
print("Please enter a valid age")
else:
print("Please enter your age")
else:
print("Please enter a valid salutation")
else:
print("Please enter a valid name")
else:
print(colored("You did not reply with a yes(Y) or no(N)!", 'green'))
print(colored("Need to run the program again.", 'green'))
exit() | select_a_friend | identifier_name |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it feels good to be a gangster", "Message only"]
print("Hello!")
print ("Let\'s get started!")
Choice = raw_input("Do you want to continue as the default user-" + spy.salutation + " " + spy.name + " or create a new user? (Y/N):")
#Function to add a status
def add_status():
updated_status_message = None
if spy.current_status_message is not None:
print ('Your current status message is %s \n' % spy.current_status_message)
else:
print ('You don\'t have any status message currently \n')
default = raw_input("Do you want to select from the older status (y/n)? ")
if default.upper() == "N":
new_status_message = raw_input("What status message do you want to see?: ")
if len(new_status_message) > 0:
STATUS_MESSAGES.append(new_status_message)
updated_status_message = new_status_message
elif default.upper() == 'Y':
item_position = 1
for message in STATUS_MESSAGES:
print ('%d. %s' % (item_position, message))
item_position = item_position + 1
message_selection = int(raw_input("\nChoose the index of the status: "))
if len(STATUS_MESSAGES) >= message_selection:
updated_status_message = STATUS_MESSAGES[message_selection - 1]
else:
print ('The option you chose is not valid! Press either Y or N.')
if updated_status_message:
print ('Your updated status message is:'),
print(updated_status_message)
else:
print('You did not update your status message')
return updated_status_message
#Function to add a friend
def add_friend():
new_friend = Spy(" ", " ", 0, 0.0)
new_friend.name = raw_input("Please add your friend's name: ")
new_friend.salutation = raw_input("Are they Mr or Ms?: ")
new_friend.age = input("Age?: ")
new_friend.age = int(new_friend.age)
new_friend.rating = input("Spy rating?: ")
new_friend.rating = float(new_friend.rating)
if len(new_friend.name) > 0 and new_friend.name.isdigit() == False and 12 < new_friend.age < 50 and new_friend.salutation.isalpha() == True and new_friend.rating >= spy.rating :
friends.append(new_friend)
print('Friend Added!')
else:
print("Sorry, the friend cannot be a spy!")
return len(friends)
#Function to select a friend
def select_a_friend():
item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the friend: ")
friend_choice_position = int(friend_choice) - 1
if friend_choice_position + 1 > len(friends):
print("Sorry,This friend is not present.")
exit()
else:
return friend_choice_position
#Function to send a message
def send_a_message():
friend_choice = select_a_friend()
original_image = raw_input("What is the name of the image?: ")
output_path = "output.jpg"
text = raw_input("What do you want to say? ")
Steganography.encode(original_image, output_path, text)
new_chat = ChatMessage(text)
friends[friend_choice].chats.append(new_chat)
print("Your secret message image is ready!")
#Function to read a message
def read_a_message():
sender = select_a_friend()
output_path = raw_input("What is the name of the image file?: ")
try:
secret_text = Steganography.decode(output_path)
print ("The secret message you read is"),
print (secret_text)
words = secret_text.split()
new = (secret_text.upper()).split()
new_chat = ChatMessage(secret_text)
# Adds the mesaage to the list of chats
friends[sender].chats.append(new_chat)
print("Your secret message has been saved!")
# If any error occurs during decoding
except TypeError:
print("Nothing to decode from the image as it contains no secret message.")
#Function to read chat history
def read_chat_history():
read_for = select_a_friend()
print ('\n')
for chat in friends[read_for].chats:
if chat.sent_by_me:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print("You said:"),
print(str(chat.message))
else:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print(str(friends[read_for].name) + " said:"),
print(str(chat.message))
#Function to select the default user
def start_chat(spy):
spy.name = spy.salutation + " " + spy.name
if 12 < spy.age < 50:
print("Authentication complete.")
print("Welcome " + str(spy.name))
print("Your age:" + str(spy.age))
print("Your rating:"+str(spy.rating))
print("Bravo!Proud to have you on board.")
show_menu = True
while show_menu:
menu_choices = "What do you want to do? \n 1. Add a status update \n" \
" 2. Add a friend \n 3. Send a secret message \n " \
"4. Read a secret message \n 5. Read Chats from a user \n" \
" 6. Close Application \n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0:
menu_choice = int(menu_choice)
if menu_choice == 1:
spy.current_status_message = add_status()
elif menu_choice == 2:
number_of_friends = add_friend()
print ('You have %d friends' % number_of_friends)
elif menu_choice == 3:
send_a_message()
elif menu_choice == 4:
read_a_message()
elif menu_choice == 5:
read_chat_history()
elif menu_choice == 6:
print("Successfully closed")
show_menu = False
else:
print("That was a wrong choice.")
exit()
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
#Block of code from which execution starts
if Choice.upper() == "Y":
#Function call for default user
start_chat(spy)
elif Choice.upper() == "N":
spy = Spy(" ", " ", 0, 0.0)
#
spy.name = raw_input("Welcome to spy chat, you must tell me your spy name first: ")
if len(spy.name) > 0 and spy.name.isdigit() == False:
spy.salutation = raw_input("What should we call you? Mr. or Ms.?")
if len(spy.salutation) > 0:
spy.age = raw_input("Please enter your age: ")
if len(spy.age) > 0:
spy.age = int(spy.age)
if 12 <= spy.age < 50:
print("Welcome to Spy community")
spy.rating = raw_input("Please enter your spy rating: ")
if len(spy.rating) > 0:
spy.rating = float(spy.rating)
if spy.rating > 4.5:
print("Great Ace!")
elif 3.5 <= spy.rating <= 4.5:
print("You are one of the good ones!")
elif 2.5 <= spy.rating <= 3.5:
print("You can always do better.")
else:
print("We will get someone to help you.")
spy.is_online = True
| else:
print ("Enter a valid spy rating")
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
else:
print("Please enter a valid age")
else:
print("Please enter your age")
else:
print("Please enter a valid salutation")
else:
print("Please enter a valid name")
else:
print(colored("You did not reply with a yes(Y) or no(N)!", 'green'))
print(colored("Need to run the program again.", 'green'))
exit() |
start_chat(spy)
| random_line_split |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it feels good to be a gangster", "Message only"]
print("Hello!")
print ("Let\'s get started!")
Choice = raw_input("Do you want to continue as the default user-" + spy.salutation + " " + spy.name + " or create a new user? (Y/N):")
#Function to add a status
def add_status():
updated_status_message = None
if spy.current_status_message is not None:
print ('Your current status message is %s \n' % spy.current_status_message)
else:
print ('You don\'t have any status message currently \n')
default = raw_input("Do you want to select from the older status (y/n)? ")
if default.upper() == "N":
new_status_message = raw_input("What status message do you want to see?: ")
if len(new_status_message) > 0:
STATUS_MESSAGES.append(new_status_message)
updated_status_message = new_status_message
elif default.upper() == 'Y':
item_position = 1
for message in STATUS_MESSAGES:
print ('%d. %s' % (item_position, message))
item_position = item_position + 1
message_selection = int(raw_input("\nChoose the index of the status: "))
if len(STATUS_MESSAGES) >= message_selection:
updated_status_message = STATUS_MESSAGES[message_selection - 1]
else:
print ('The option you chose is not valid! Press either Y or N.')
if updated_status_message:
print ('Your updated status message is:'),
print(updated_status_message)
else:
print('You did not update your status message')
return updated_status_message
#Function to add a friend
def add_friend():
new_friend = Spy(" ", " ", 0, 0.0)
new_friend.name = raw_input("Please add your friend's name: ")
new_friend.salutation = raw_input("Are they Mr or Ms?: ")
new_friend.age = input("Age?: ")
new_friend.age = int(new_friend.age)
new_friend.rating = input("Spy rating?: ")
new_friend.rating = float(new_friend.rating)
if len(new_friend.name) > 0 and new_friend.name.isdigit() == False and 12 < new_friend.age < 50 and new_friend.salutation.isalpha() == True and new_friend.rating >= spy.rating :
friends.append(new_friend)
print('Friend Added!')
else:
print("Sorry, the friend cannot be a spy!")
return len(friends)
#Function to select a friend
def select_a_friend():
|
#Function to send a message
def send_a_message():
friend_choice = select_a_friend()
original_image = raw_input("What is the name of the image?: ")
output_path = "output.jpg"
text = raw_input("What do you want to say? ")
Steganography.encode(original_image, output_path, text)
new_chat = ChatMessage(text)
friends[friend_choice].chats.append(new_chat)
print("Your secret message image is ready!")
#Function to read a message
def read_a_message():
sender = select_a_friend()
output_path = raw_input("What is the name of the image file?: ")
try:
secret_text = Steganography.decode(output_path)
print ("The secret message you read is"),
print (secret_text)
words = secret_text.split()
new = (secret_text.upper()).split()
new_chat = ChatMessage(secret_text)
# Adds the mesaage to the list of chats
friends[sender].chats.append(new_chat)
print("Your secret message has been saved!")
# If any error occurs during decoding
except TypeError:
print("Nothing to decode from the image as it contains no secret message.")
#Function to read chat history
def read_chat_history():
read_for = select_a_friend()
print ('\n')
for chat in friends[read_for].chats:
if chat.sent_by_me:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print("You said:"),
print(str(chat.message))
else:
print(str(chat.time.strftime("%d %B %Y %A %H:%M")) + ","),
print(str(friends[read_for].name) + " said:"),
print(str(chat.message))
#Function to select the default user
def start_chat(spy):
spy.name = spy.salutation + " " + spy.name
if 12 < spy.age < 50:
print("Authentication complete.")
print("Welcome " + str(spy.name))
print("Your age:" + str(spy.age))
print("Your rating:"+str(spy.rating))
print("Bravo!Proud to have you on board.")
show_menu = True
while show_menu:
menu_choices = "What do you want to do? \n 1. Add a status update \n" \
" 2. Add a friend \n 3. Send a secret message \n " \
"4. Read a secret message \n 5. Read Chats from a user \n" \
" 6. Close Application \n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0:
menu_choice = int(menu_choice)
if menu_choice == 1:
spy.current_status_message = add_status()
elif menu_choice == 2:
number_of_friends = add_friend()
print ('You have %d friends' % number_of_friends)
elif menu_choice == 3:
send_a_message()
elif menu_choice == 4:
read_a_message()
elif menu_choice == 5:
read_chat_history()
elif menu_choice == 6:
print("Successfully closed")
show_menu = False
else:
print("That was a wrong choice.")
exit()
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
#Block of code from which execution starts
if Choice.upper() == "Y":
#Function call for default user
start_chat(spy)
elif Choice.upper() == "N":
spy = Spy(" ", " ", 0, 0.0)
#
spy.name = raw_input("Welcome to spy chat, you must tell me your spy name first: ")
if len(spy.name) > 0 and spy.name.isdigit() == False:
spy.salutation = raw_input("What should we call you? Mr. or Ms.?")
if len(spy.salutation) > 0:
spy.age = raw_input("Please enter your age: ")
if len(spy.age) > 0:
spy.age = int(spy.age)
if 12 <= spy.age < 50:
print("Welcome to Spy community")
spy.rating = raw_input("Please enter your spy rating: ")
if len(spy.rating) > 0:
spy.rating = float(spy.rating)
if spy.rating > 4.5:
print("Great Ace!")
elif 3.5 <= spy.rating <= 4.5:
print("You are one of the good ones!")
elif 2.5 <= spy.rating <= 3.5:
print("You can always do better.")
else:
print("We will get someone to help you.")
spy.is_online = True
start_chat(spy)
else:
print ("Enter a valid spy rating")
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.age >= 50:
print("Sorry, you are too old to be a spy!")
else:
print("Please enter a valid age")
else:
print("Please enter your age")
else:
print("Please enter a valid salutation")
else:
print("Please enter a valid name")
else:
print(colored("You did not reply with a yes(Y) or no(N)!", 'green'))
print(colored("Need to run the program again.", 'green'))
exit() | item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the friend: ")
friend_choice_position = int(friend_choice) - 1
if friend_choice_position + 1 > len(friends):
print("Sorry,This friend is not present.")
exit()
else:
return friend_choice_position | identifier_body |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class to do Modal Transient Analysis."""
def __init__(self, name):
"""Initialize the SCR Object."""
self.name = name
self.phi = []
self.hwlist = []
self.ltm = []
self.eig = []
self.pfile = []
self.zeta = []
self.u = {}
self.eta = {}
self.time = {}
def load_phi(self, **kwargs):
"""Method to load the Normal Modes Matrix (PHI) into the analysis.
Procedure to load the f12 and corresponding f06.
scr.load_phi(msf12='ms_xp93s1gl.f12', msf06='ms_xp93s1gl.f06')
"""
msf12 = kwargs['msf12']
msf06 = kwargs['msf06']
self.phi = PHI(msf12, msf06)
self.zeta = 0.01 * np.ones([self.phi.num_modes])
def load_hwlist(self, **kwargs):
"""Method to load the Hardware List (HWLIST) into the analysis.
Ex: scr.load_hwlist(hwlist='xp_hwlist.xls')
"""
hwlist = kwargs['hwlist']
self.hwlist = HWLIST(hwlist)
def load_ltm(self, **kwargs):
"""Method to load the LTM into the analysis.
Ex: scr.load_ltm(ltm='xp93zz_scr.pch)"""
ltm = kwargs['ltm']
self.ltm = LTM(ltm)
self.ltm.label_ltm(self.hwlist)
def load_eig(self, **kwargs):
"""Method to load the eigenvalue file into the analysis.
Ex: scr.load_eig(eig='xp93zz.eig')"""
eig = kwargs['eig']
self.eig = EIG(eig)
def load_pfile(self, **kwargs):
"""Method to load the forcing function (PFILE) into the analysis.
* Auto time step skips time steps that do not contribute above some threshold.
Ex: scr.load_pfile(pfile='ff_xp93s1sp0001.dat', filetype=['pfile' or 'matfile'])
Ex: Parse the text data into numbers.
scr.pfile.parse_pfile(case=[1,100])
Ex: Sync and set the run to have a timestep of 0.01 sec.
scr.pfile.sync(case=[1,100], tstep=0.01)
Ex: Sync and set the run to have an auto time step, defaults to 0.01 sec.
scr.pfile.sync(case=76, auto='yes')
Ex: Sync and set the run to have an auto time step with the times were force exists = 0.02 sec
scr.pfile.sync(case=76, auto='yes', tstep=0.02)
"""
pfile = kwargs['pfile']
filetype = kwargs['filetype']
# Loads the pfile and finds the indices, still need to sync and parse.
self.pfile = PFILE(pfile, filetype=filetype)
# self.pfile.sync(tstep='auto')
def load_zeta(self, **kwargs):
"""Method to load the damping file.
Ex: scr.load_zeta(damp='xp93s1/DAMPINGFILE')
"""
dampfile = kwargs['damp']
with open(dampfile) as f:
for line in f:
if line[0] != '$' and line[0] != 'i':
row = line.split()
row = list(map(float, row))
self.zeta[int(row[0] - 1)] = 0.01 * row[1]
def save2mat(self, outfile):
"""Method to save the scr object to a Matlab mat file.
Ex: scr.save2mat('xp93zz/sc_xp93zzsp0001.mat')
"""
from matlab.mat_utilities import save2mat
from matlab.mat_utilities import tuple2list as t2l
doflist = {'acron_dofs': t2l(self.ltm.acron_dofs)}
outlist = [self.eta, self.u, self.time, doflist]
keylist = ['eta', 'u', 'time', 'ltm']
save2mat(key=keylist, olist=outlist, ofile=outfile)
def plot_u(self, **kwargs):
"""Method to plot the response in the time domain.
Ex: Plot this dof for case 1 and 2, and label the window "u test"
scr.plot_u(items=[(1, 'N1PN3', 'TOR'), (2, 'N1PN3', 'TOR')], desc='u test')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
# Loop and plot each requested dof.
fig = figure()
ax = subplot(111)
for item in items:
if item.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = item[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (item[1], item[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Plot the requested time history.
label = '({0}, {1}) case: {2}'.format(dof[0], dof[1], c)
ax.plot(self.time[c], self.u[c][i_dof, :], label=label)
ax.legend()
title('Response of FF: %s' % (self.pfile.name))
xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
show()
def plot_eta(self, **kwargs):
"""Method to plot the modal displacements.
Ex: Plot mode 7 for case 1 and case 100, and label the window "eta sp0001".
scr.plot_eta(items=[(1, 7), (100, 7)], desc='eta sp0001')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
fig = plt.figure()
ax = plt.subplot(111)
for item in items:
c = item[0]
mode = item[1]
if mode > self.phi.num_modes:
raise Exception("!!! Only %s modes in analysis !!!" % self.phi.num_modes.__str__())
# Plot the requested modal displacement.
label = 'Mode {0} case: {1}'.format(mode, c)
ax.plot(self.time[c], self.eta[c][mode - 1, :], label=label)
ax.legend()
plt.title('Modal Response of FF: %s' % self.pfile.name)
plt.xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
plt.show()
def amx(self, **kwargs):
"""Method to find the max/mins for one or all output DOF.\n
Ex: Find the max/mins and times for this DOF in case 1.
scr.amx(item=(1, 'N2LAB', 'TOR'))
"""
# Determine the keyword arguments.
if 'item' in kwargs.keys():
item = kwargs['item']
if not type(item) is tuple:
raise Exception('Requested dof {0} is not a tuple (case, "acron", "dof").'.format(dof))
dof = (item[1], item[2])
case = item[0]
else:
raise Exception('You must request a dof: scr.amx(item=(case, "acron", "dof")).')
# Determine the location of the requested dof.
loc = [x for x, y in enumerate(self.ltm.acron_dofs) if y == dof][0]
# Determine the max/min and the time at which they occurred.
dof_res = self.u[case][loc, :]
max_val = np.max(dof_res)
min_val = np.min(dof_res)
max_loc = np.argmax(dof_res)
min_loc = np.argmin(dof_res)
max_time = self.time[case][max_loc]
min_time = self.time[case][min_loc]
# Print to the screen.
print('Case {0}- \t{1}\tMax: {2:.4f} (@ {3:.4f} sec)\tMin: {4:.4f} (@ {5:.4f} sec)\n'.format(
case, dof, max_val, max_time, min_val, min_time
))
def fft(self, **kwargs):
"""Method to perform fft on a signal.
Ex: Plot fft of several responses.
scr.fft(u_out=[(1, 'SSSIEA', 'FX'), (1, 'SSSIEA', 'FY')])
Ex: Plot fft of several applied forces.
scr.fft(f_in=[(1, 100012, 1), (1, 100012, 2), (1, 100012, 3)])
"""
from PyLnD.loads.freq_domain import FFT
u_out = []
f_in = []
# Obtain the keyword arguments.
if 'u_out' in kwargs.keys():
u_out = kwargs['u_out']
if type(u_out) is not list:
u_out = [u_out]
if 'f_in' in kwargs.keys():
f_in = kwargs['f_in']
if type(f_in) is not list:
f_in = [f_in]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
# Loop, perform fft, and plot each requested response.
if u_out:
for resp in u_out:
if resp.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = resp[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (resp[1], resp[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
| else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Create FFT object.
u_fft = FFT(resp, x=self.u[c][i_dof, :], time=self.time[c])
# Plot the requested response fft.
fig = plt.figure(1)
u_fft.plot_fft()
for load in f_in:
if load.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = load[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
grid_id = load[1]
dir = load[2]
# Create FFT object.
p_fft = FFT(load, x=self.pfile.case[c][grid_id][:, dir], time=self.pfile.case[c][grid_id][:, 0])
# Plot the requested response fft.
p_fft.plot_fft()
def rss(self, case):
"""Method to RSS responses as dictated by the HWLIST.
Ex: Perform the rss on case 1.
scr.rss(1)
"""
# Loop over each HWLIST RSS items, determine if available in the LTM, perform RSS on u.
for item in self.hwlist.hw_rss:
acron = item[0]
eid = item[1]
dof = item[2]
rss_list = []
rss_idx = []
rss_dofs = []
if acron in self.hwlist.hw.keys():
if eid in self.hwlist.hw[acron].keys():
if dof in self.hwlist.hw[acron][eid].keys():
rss_dofs.append((acron, dof))
for d in self.hwlist.hw[acron][eid][dof]['dofs']:
eid_d = (eid, d)
rss_list.append(eid_d)
if eid_d in self.ltm.dofs:
rss_idx.append(self.ltm.dofs.index(eid_d))
else:
raise Exception('Missing {0} in {1} and {2} in ltm.'.format(eid_d, acron, dof))
rss_sum = np.zeros_like(self.u[case][0, :])
for idx in rss_idx:
rss_sum = rss_sum + np.square(self.u[case][idx, :])
rss = np.sqrt(rss_sum)
# Add the RSS to the u and ltm.acron_dof.
self.ltm.acron_dofs.append((acron, dof))
self.u[case] = np.vstack([self.u[case], rss])
def run(self, **kwargs):
"""Method to perform numerical integration of EOM via Recurrence Formulas.
Ex: Run case 1 and 2.
scr.run(case=[1, 2])
Ex: Run all cases.
scr.run(case='all')
"""
# Get the kwargs.
cases = kwargs['case']
if cases == 'all':
cases = scr.pfile.case.keys()
elif type(cases) is not list:
cases = [cases]
if 'rbm' in kwargs.keys():
if kwargs['rbm'].lower() == 'yes':
rbm = 1
else:
rbm = 0
else:
rbm = 0
# Run all the requested cases.
for c in cases:
# Create the current case dictionary key.
if c not in self.time.keys():
self.time[c] = []
if c not in self.u.keys():
self.u[c] = []
if c not in self.eta.keys():
self.eta[c] = []
# Determine the modal force vector.
p_modal = modal_p(self.pfile.case[c], self.phi)
# Determine the time parameters in the forcing function.
grid = self.pfile.case[c]['grids'][0]
self.time[c] = self.pfile.case[c][grid][:, 0]
dt = self.pfile.case[c]['dt']
# Add 100 seconds at the end of the forcing function for ring down.
add_time = [(20, 0.01), (80, 0.5)]
for at in add_time:
new_time = np.arange(self.time[c][-1] + dt, self.time[c][-1] + at[0], at[1])
self.time[c] = np.append(self.time[c], new_time)
new_p_modal = np.zeros([self.phi.num_modes, new_time.size])
p_modal = np.append(p_modal, new_p_modal, axis=1)
# Integrate the modal EOM using Reccurence Formulas:
# etadd + 2 * zeta omn * etad + omn**2 * eta = P
eta0 = np.zeros_like(p_modal)
etad0 = np.zeros_like(p_modal)
[self.eta[c], etad] = rf_mdof(self.time[c], p_modal, self.eig.eigenvalues,
np.multiply(2 * np.pi, self.eig.frequency), self.zeta,
eta0, etad0)
# Remove rigid body modes unless requested not to.
if rbm == 0:
self.eta[c][0:6, :] = 0.0
# Recover the desired responses with superposition of modes using the LTM
self.u[c] = self.ltm.dtm @ self.eta[c]
# Perform the required RSS set out in the HWLIST.
self.rss(c) | if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
| random_line_split |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class to do Modal Transient Analysis."""
def __init__(self, name):
"""Initialize the SCR Object."""
self.name = name
self.phi = []
self.hwlist = []
self.ltm = []
self.eig = []
self.pfile = []
self.zeta = []
self.u = {}
self.eta = {}
self.time = {}
def load_phi(self, **kwargs):
"""Method to load the Normal Modes Matrix (PHI) into the analysis.
Procedure to load the f12 and corresponding f06.
scr.load_phi(msf12='ms_xp93s1gl.f12', msf06='ms_xp93s1gl.f06')
"""
msf12 = kwargs['msf12']
msf06 = kwargs['msf06']
self.phi = PHI(msf12, msf06)
self.zeta = 0.01 * np.ones([self.phi.num_modes])
def load_hwlist(self, **kwargs):
|
def load_ltm(self, **kwargs):
"""Method to load the LTM into the analysis.
Ex: scr.load_ltm(ltm='xp93zz_scr.pch)"""
ltm = kwargs['ltm']
self.ltm = LTM(ltm)
self.ltm.label_ltm(self.hwlist)
def load_eig(self, **kwargs):
"""Method to load the eigenvalue file into the analysis.
Ex: scr.load_eig(eig='xp93zz.eig')"""
eig = kwargs['eig']
self.eig = EIG(eig)
def load_pfile(self, **kwargs):
"""Method to load the forcing function (PFILE) into the analysis.
* Auto time step skips time steps that do not contribute above some threshold.
Ex: scr.load_pfile(pfile='ff_xp93s1sp0001.dat', filetype=['pfile' or 'matfile'])
Ex: Parse the text data into numbers.
scr.pfile.parse_pfile(case=[1,100])
Ex: Sync and set the run to have a timestep of 0.01 sec.
scr.pfile.sync(case=[1,100], tstep=0.01)
Ex: Sync and set the run to have an auto time step, defaults to 0.01 sec.
scr.pfile.sync(case=76, auto='yes')
Ex: Sync and set the run to have an auto time step with the times were force exists = 0.02 sec
scr.pfile.sync(case=76, auto='yes', tstep=0.02)
"""
pfile = kwargs['pfile']
filetype = kwargs['filetype']
# Loads the pfile and finds the indices, still need to sync and parse.
self.pfile = PFILE(pfile, filetype=filetype)
# self.pfile.sync(tstep='auto')
def load_zeta(self, **kwargs):
"""Method to load the damping file.
Ex: scr.load_zeta(damp='xp93s1/DAMPINGFILE')
"""
dampfile = kwargs['damp']
with open(dampfile) as f:
for line in f:
if line[0] != '$' and line[0] != 'i':
row = line.split()
row = list(map(float, row))
self.zeta[int(row[0] - 1)] = 0.01 * row[1]
def save2mat(self, outfile):
"""Method to save the scr object to a Matlab mat file.
Ex: scr.save2mat('xp93zz/sc_xp93zzsp0001.mat')
"""
from matlab.mat_utilities import save2mat
from matlab.mat_utilities import tuple2list as t2l
doflist = {'acron_dofs': t2l(self.ltm.acron_dofs)}
outlist = [self.eta, self.u, self.time, doflist]
keylist = ['eta', 'u', 'time', 'ltm']
save2mat(key=keylist, olist=outlist, ofile=outfile)
def plot_u(self, **kwargs):
"""Method to plot the response in the time domain.
Ex: Plot this dof for case 1 and 2, and label the window "u test"
scr.plot_u(items=[(1, 'N1PN3', 'TOR'), (2, 'N1PN3', 'TOR')], desc='u test')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
# Loop and plot each requested dof.
fig = figure()
ax = subplot(111)
for item in items:
if item.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = item[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (item[1], item[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Plot the requested time history.
label = '({0}, {1}) case: {2}'.format(dof[0], dof[1], c)
ax.plot(self.time[c], self.u[c][i_dof, :], label=label)
ax.legend()
title('Response of FF: %s' % (self.pfile.name))
xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
show()
def plot_eta(self, **kwargs):
"""Method to plot the modal displacements.
Ex: Plot mode 7 for case 1 and case 100, and label the window "eta sp0001".
scr.plot_eta(items=[(1, 7), (100, 7)], desc='eta sp0001')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
fig = plt.figure()
ax = plt.subplot(111)
for item in items:
c = item[0]
mode = item[1]
if mode > self.phi.num_modes:
raise Exception("!!! Only %s modes in analysis !!!" % self.phi.num_modes.__str__())
# Plot the requested modal displacement.
label = 'Mode {0} case: {1}'.format(mode, c)
ax.plot(self.time[c], self.eta[c][mode - 1, :], label=label)
ax.legend()
plt.title('Modal Response of FF: %s' % self.pfile.name)
plt.xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
plt.show()
def amx(self, **kwargs):
"""Method to find the max/mins for one or all output DOF.\n
Ex: Find the max/mins and times for this DOF in case 1.
scr.amx(item=(1, 'N2LAB', 'TOR'))
"""
# Determine the keyword arguments.
if 'item' in kwargs.keys():
item = kwargs['item']
if not type(item) is tuple:
raise Exception('Requested dof {0} is not a tuple (case, "acron", "dof").'.format(dof))
dof = (item[1], item[2])
case = item[0]
else:
raise Exception('You must request a dof: scr.amx(item=(case, "acron", "dof")).')
# Determine the location of the requested dof.
loc = [x for x, y in enumerate(self.ltm.acron_dofs) if y == dof][0]
# Determine the max/min and the time at which they occurred.
dof_res = self.u[case][loc, :]
max_val = np.max(dof_res)
min_val = np.min(dof_res)
max_loc = np.argmax(dof_res)
min_loc = np.argmin(dof_res)
max_time = self.time[case][max_loc]
min_time = self.time[case][min_loc]
# Print to the screen.
print('Case {0}- \t{1}\tMax: {2:.4f} (@ {3:.4f} sec)\tMin: {4:.4f} (@ {5:.4f} sec)\n'.format(
case, dof, max_val, max_time, min_val, min_time
))
def fft(self, **kwargs):
"""Method to perform fft on a signal.
Ex: Plot fft of several responses.
scr.fft(u_out=[(1, 'SSSIEA', 'FX'), (1, 'SSSIEA', 'FY')])
Ex: Plot fft of several applied forces.
scr.fft(f_in=[(1, 100012, 1), (1, 100012, 2), (1, 100012, 3)])
"""
from PyLnD.loads.freq_domain import FFT
u_out = []
f_in = []
# Obtain the keyword arguments.
if 'u_out' in kwargs.keys():
u_out = kwargs['u_out']
if type(u_out) is not list:
u_out = [u_out]
if 'f_in' in kwargs.keys():
f_in = kwargs['f_in']
if type(f_in) is not list:
f_in = [f_in]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
# Loop, perform fft, and plot each requested response.
if u_out:
for resp in u_out:
if resp.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = resp[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (resp[1], resp[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Create FFT object.
u_fft = FFT(resp, x=self.u[c][i_dof, :], time=self.time[c])
# Plot the requested response fft.
fig = plt.figure(1)
u_fft.plot_fft()
for load in f_in:
if load.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = load[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
grid_id = load[1]
dir = load[2]
# Create FFT object.
p_fft = FFT(load, x=self.pfile.case[c][grid_id][:, dir], time=self.pfile.case[c][grid_id][:, 0])
# Plot the requested response fft.
p_fft.plot_fft()
def rss(self, case):
"""Method to RSS responses as dictated by the HWLIST.
Ex: Perform the rss on case 1.
scr.rss(1)
"""
# Loop over each HWLIST RSS items, determine if available in the LTM, perform RSS on u.
for item in self.hwlist.hw_rss:
acron = item[0]
eid = item[1]
dof = item[2]
rss_list = []
rss_idx = []
rss_dofs = []
if acron in self.hwlist.hw.keys():
if eid in self.hwlist.hw[acron].keys():
if dof in self.hwlist.hw[acron][eid].keys():
rss_dofs.append((acron, dof))
for d in self.hwlist.hw[acron][eid][dof]['dofs']:
eid_d = (eid, d)
rss_list.append(eid_d)
if eid_d in self.ltm.dofs:
rss_idx.append(self.ltm.dofs.index(eid_d))
else:
raise Exception('Missing {0} in {1} and {2} in ltm.'.format(eid_d, acron, dof))
rss_sum = np.zeros_like(self.u[case][0, :])
for idx in rss_idx:
rss_sum = rss_sum + np.square(self.u[case][idx, :])
rss = np.sqrt(rss_sum)
# Add the RSS to the u and ltm.acron_dof.
self.ltm.acron_dofs.append((acron, dof))
self.u[case] = np.vstack([self.u[case], rss])
def run(self, **kwargs):
"""Method to perform numerical integration of EOM via Recurrence Formulas.
Ex: Run case 1 and 2.
scr.run(case=[1, 2])
Ex: Run all cases.
scr.run(case='all')
"""
# Get the kwargs.
cases = kwargs['case']
if cases == 'all':
cases = scr.pfile.case.keys()
elif type(cases) is not list:
cases = [cases]
if 'rbm' in kwargs.keys():
if kwargs['rbm'].lower() == 'yes':
rbm = 1
else:
rbm = 0
else:
rbm = 0
# Run all the requested cases.
for c in cases:
# Create the current case dictionary key.
if c not in self.time.keys():
self.time[c] = []
if c not in self.u.keys():
self.u[c] = []
if c not in self.eta.keys():
self.eta[c] = []
# Determine the modal force vector.
p_modal = modal_p(self.pfile.case[c], self.phi)
# Determine the time parameters in the forcing function.
grid = self.pfile.case[c]['grids'][0]
self.time[c] = self.pfile.case[c][grid][:, 0]
dt = self.pfile.case[c]['dt']
# Add 100 seconds at the end of the forcing function for ring down.
add_time = [(20, 0.01), (80, 0.5)]
for at in add_time:
new_time = np.arange(self.time[c][-1] + dt, self.time[c][-1] + at[0], at[1])
self.time[c] = np.append(self.time[c], new_time)
new_p_modal = np.zeros([self.phi.num_modes, new_time.size])
p_modal = np.append(p_modal, new_p_modal, axis=1)
# Integrate the modal EOM using Reccurence Formulas:
# etadd + 2 * zeta omn * etad + omn**2 * eta = P
eta0 = np.zeros_like(p_modal)
etad0 = np.zeros_like(p_modal)
[self.eta[c], etad] = rf_mdof(self.time[c], p_modal, self.eig.eigenvalues,
np.multiply(2 * np.pi, self.eig.frequency), self.zeta,
eta0, etad0)
# Remove rigid body modes unless requested not to.
if rbm == 0:
self.eta[c][0:6, :] = 0.0
# Recover the desired responses with superposition of modes using the LTM
self.u[c] = self.ltm.dtm @ self.eta[c]
# Perform the required RSS set out in the HWLIST.
self.rss(c)
| """Method to load the Hardware List (HWLIST) into the analysis.
Ex: scr.load_hwlist(hwlist='xp_hwlist.xls')
"""
hwlist = kwargs['hwlist']
self.hwlist = HWLIST(hwlist) | identifier_body |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class to do Modal Transient Analysis."""
def __init__(self, name):
"""Initialize the SCR Object."""
self.name = name
self.phi = []
self.hwlist = []
self.ltm = []
self.eig = []
self.pfile = []
self.zeta = []
self.u = {}
self.eta = {}
self.time = {}
def load_phi(self, **kwargs):
"""Method to load the Normal Modes Matrix (PHI) into the analysis.
Procedure to load the f12 and corresponding f06.
scr.load_phi(msf12='ms_xp93s1gl.f12', msf06='ms_xp93s1gl.f06')
"""
msf12 = kwargs['msf12']
msf06 = kwargs['msf06']
self.phi = PHI(msf12, msf06)
self.zeta = 0.01 * np.ones([self.phi.num_modes])
def load_hwlist(self, **kwargs):
"""Method to load the Hardware List (HWLIST) into the analysis.
Ex: scr.load_hwlist(hwlist='xp_hwlist.xls')
"""
hwlist = kwargs['hwlist']
self.hwlist = HWLIST(hwlist)
def load_ltm(self, **kwargs):
"""Method to load the LTM into the analysis.
Ex: scr.load_ltm(ltm='xp93zz_scr.pch)"""
ltm = kwargs['ltm']
self.ltm = LTM(ltm)
self.ltm.label_ltm(self.hwlist)
def load_eig(self, **kwargs):
"""Method to load the eigenvalue file into the analysis.
Ex: scr.load_eig(eig='xp93zz.eig')"""
eig = kwargs['eig']
self.eig = EIG(eig)
def load_pfile(self, **kwargs):
"""Method to load the forcing function (PFILE) into the analysis.
* Auto time step skips time steps that do not contribute above some threshold.
Ex: scr.load_pfile(pfile='ff_xp93s1sp0001.dat', filetype=['pfile' or 'matfile'])
Ex: Parse the text data into numbers.
scr.pfile.parse_pfile(case=[1,100])
Ex: Sync and set the run to have a timestep of 0.01 sec.
scr.pfile.sync(case=[1,100], tstep=0.01)
Ex: Sync and set the run to have an auto time step, defaults to 0.01 sec.
scr.pfile.sync(case=76, auto='yes')
Ex: Sync and set the run to have an auto time step with the times were force exists = 0.02 sec
scr.pfile.sync(case=76, auto='yes', tstep=0.02)
"""
pfile = kwargs['pfile']
filetype = kwargs['filetype']
# Loads the pfile and finds the indices, still need to sync and parse.
self.pfile = PFILE(pfile, filetype=filetype)
# self.pfile.sync(tstep='auto')
def load_zeta(self, **kwargs):
"""Method to load the damping file.
Ex: scr.load_zeta(damp='xp93s1/DAMPINGFILE')
"""
dampfile = kwargs['damp']
with open(dampfile) as f:
for line in f:
if line[0] != '$' and line[0] != 'i':
row = line.split()
row = list(map(float, row))
self.zeta[int(row[0] - 1)] = 0.01 * row[1]
def save2mat(self, outfile):
"""Method to save the scr object to a Matlab mat file.
Ex: scr.save2mat('xp93zz/sc_xp93zzsp0001.mat')
"""
from matlab.mat_utilities import save2mat
from matlab.mat_utilities import tuple2list as t2l
doflist = {'acron_dofs': t2l(self.ltm.acron_dofs)}
outlist = [self.eta, self.u, self.time, doflist]
keylist = ['eta', 'u', 'time', 'ltm']
save2mat(key=keylist, olist=outlist, ofile=outfile)
def plot_u(self, **kwargs):
"""Method to plot the response in the time domain.
Ex: Plot this dof for case 1 and 2, and label the window "u test"
scr.plot_u(items=[(1, 'N1PN3', 'TOR'), (2, 'N1PN3', 'TOR')], desc='u test')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
|
else:
desc = ''
# Loop and plot each requested dof.
fig = figure()
ax = subplot(111)
for item in items:
if item.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = item[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (item[1], item[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Plot the requested time history.
label = '({0}, {1}) case: {2}'.format(dof[0], dof[1], c)
ax.plot(self.time[c], self.u[c][i_dof, :], label=label)
ax.legend()
title('Response of FF: %s' % (self.pfile.name))
xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
show()
def plot_eta(self, **kwargs):
"""Method to plot the modal displacements.
Ex: Plot mode 7 for case 1 and case 100, and label the window "eta sp0001".
scr.plot_eta(items=[(1, 7), (100, 7)], desc='eta sp0001')
"""
# Get the kwargs.
items = kwargs['items']
if type(items) is not list:
items = [items]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
fig = plt.figure()
ax = plt.subplot(111)
for item in items:
c = item[0]
mode = item[1]
if mode > self.phi.num_modes:
raise Exception("!!! Only %s modes in analysis !!!" % self.phi.num_modes.__str__())
# Plot the requested modal displacement.
label = 'Mode {0} case: {1}'.format(mode, c)
ax.plot(self.time[c], self.eta[c][mode - 1, :], label=label)
ax.legend()
plt.title('Modal Response of FF: %s' % self.pfile.name)
plt.xlabel('Time (s)')
fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))
plt.show()
def amx(self, **kwargs):
"""Method to find the max/mins for one or all output DOF.\n
Ex: Find the max/mins and times for this DOF in case 1.
scr.amx(item=(1, 'N2LAB', 'TOR'))
"""
# Determine the keyword arguments.
if 'item' in kwargs.keys():
item = kwargs['item']
if not type(item) is tuple:
raise Exception('Requested dof {0} is not a tuple (case, "acron", "dof").'.format(dof))
dof = (item[1], item[2])
case = item[0]
else:
raise Exception('You must request a dof: scr.amx(item=(case, "acron", "dof")).')
# Determine the location of the requested dof.
loc = [x for x, y in enumerate(self.ltm.acron_dofs) if y == dof][0]
# Determine the max/min and the time at which they occurred.
dof_res = self.u[case][loc, :]
max_val = np.max(dof_res)
min_val = np.min(dof_res)
max_loc = np.argmax(dof_res)
min_loc = np.argmin(dof_res)
max_time = self.time[case][max_loc]
min_time = self.time[case][min_loc]
# Print to the screen.
print('Case {0}- \t{1}\tMax: {2:.4f} (@ {3:.4f} sec)\tMin: {4:.4f} (@ {5:.4f} sec)\n'.format(
case, dof, max_val, max_time, min_val, min_time
))
def fft(self, **kwargs):
"""Method to perform fft on a signal.
Ex: Plot fft of several responses.
scr.fft(u_out=[(1, 'SSSIEA', 'FX'), (1, 'SSSIEA', 'FY')])
Ex: Plot fft of several applied forces.
scr.fft(f_in=[(1, 100012, 1), (1, 100012, 2), (1, 100012, 3)])
"""
from PyLnD.loads.freq_domain import FFT
u_out = []
f_in = []
# Obtain the keyword arguments.
if 'u_out' in kwargs.keys():
u_out = kwargs['u_out']
if type(u_out) is not list:
u_out = [u_out]
if 'f_in' in kwargs.keys():
f_in = kwargs['f_in']
if type(f_in) is not list:
f_in = [f_in]
if 'desc' in kwargs.keys():
desc = kwargs['desc']
else:
desc = ''
# Loop, perform fft, and plot each requested response.
if u_out:
for resp in u_out:
if resp.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = resp[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
dof = (resp[1], resp[2])
# Find the dof tuple in the acron_dof list or the dof list from the ltm object.
if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Create FFT object.
u_fft = FFT(resp, x=self.u[c][i_dof, :], time=self.time[c])
# Plot the requested response fft.
fig = plt.figure(1)
u_fft.plot_fft()
for load in f_in:
if load.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = load[0]
if c not in self.u.keys():
raise Exception('!!! Case {0} is has not been run or does not exist !!!'.format(c))
grid_id = load[1]
dir = load[2]
# Create FFT object.
p_fft = FFT(load, x=self.pfile.case[c][grid_id][:, dir], time=self.pfile.case[c][grid_id][:, 0])
# Plot the requested response fft.
p_fft.plot_fft()
def rss(self, case):
"""Method to RSS responses as dictated by the HWLIST.
Ex: Perform the rss on case 1.
scr.rss(1)
"""
# Loop over each HWLIST RSS items, determine if available in the LTM, perform RSS on u.
for item in self.hwlist.hw_rss:
acron = item[0]
eid = item[1]
dof = item[2]
rss_list = []
rss_idx = []
rss_dofs = []
if acron in self.hwlist.hw.keys():
if eid in self.hwlist.hw[acron].keys():
if dof in self.hwlist.hw[acron][eid].keys():
rss_dofs.append((acron, dof))
for d in self.hwlist.hw[acron][eid][dof]['dofs']:
eid_d = (eid, d)
rss_list.append(eid_d)
if eid_d in self.ltm.dofs:
rss_idx.append(self.ltm.dofs.index(eid_d))
else:
raise Exception('Missing {0} in {1} and {2} in ltm.'.format(eid_d, acron, dof))
rss_sum = np.zeros_like(self.u[case][0, :])
for idx in rss_idx:
rss_sum = rss_sum + np.square(self.u[case][idx, :])
rss = np.sqrt(rss_sum)
# Add the RSS to the u and ltm.acron_dof.
self.ltm.acron_dofs.append((acron, dof))
self.u[case] = np.vstack([self.u[case], rss])
def run(self, **kwargs):
"""Method to perform numerical integration of EOM via Recurrence Formulas.
Ex: Run case 1 and 2.
scr.run(case=[1, 2])
Ex: Run all cases.
scr.run(case='all')
"""
# Get the kwargs.
cases = kwargs['case']
if cases == 'all':
cases = scr.pfile.case.keys()
elif type(cases) is not list:
cases = [cases]
if 'rbm' in kwargs.keys():
if kwargs['rbm'].lower() == 'yes':
rbm = 1
else:
rbm = 0
else:
rbm = 0
# Run all the requested cases.
for c in cases:
# Create the current case dictionary key.
if c not in self.time.keys():
self.time[c] = []
if c not in self.u.keys():
self.u[c] = []
if c not in self.eta.keys():
self.eta[c] = []
# Determine the modal force vector.
p_modal = modal_p(self.pfile.case[c], self.phi)
# Determine the time parameters in the forcing function.
grid = self.pfile.case[c]['grids'][0]
self.time[c] = self.pfile.case[c][grid][:, 0]
dt = self.pfile.case[c]['dt']
# Add 100 seconds at the end of the forcing function for ring down.
add_time = [(20, 0.01), (80, 0.5)]
for at in add_time:
new_time = np.arange(self.time[c][-1] + dt, self.time[c][-1] + at[0], at[1])
self.time[c] = np.append(self.time[c], new_time)
new_p_modal = np.zeros([self.phi.num_modes, new_time.size])
p_modal = np.append(p_modal, new_p_modal, axis=1)
# Integrate the modal EOM using Reccurence Formulas:
# etadd + 2 * zeta omn * etad + omn**2 * eta = P
eta0 = np.zeros_like(p_modal)
etad0 = np.zeros_like(p_modal)
[self.eta[c], etad] = rf_mdof(self.time[c], p_modal, self.eig.eigenvalues,
np.multiply(2 * np.pi, self.eig.frequency), self.zeta,
eta0, etad0)
# Remove rigid body modes unless requested not to.
if rbm == 0:
self.eta[c][0:6, :] = 0.0
# Recover the desired responses with superposition of modes using the LTM
self.u[c] = self.ltm.dtm @ self.eta[c]
# Perform the required RSS set out in the HWLIST.
self.rss(c)
| desc = kwargs['desc'] | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.